[SK_BUFF]: Introduce skb_copy_from_linear_data{_offset}
To clearly state the intent of copying from linear sk_buffs, _offset being a overly long variant but interesting for the sake of saving some bytes. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
2a123b86e2
commit
d626f62b11
|
@ -566,7 +566,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
msg->version = XPNET_VERSION_EMBED;
|
||||
dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
|
||||
&msg->data, skb->data, (size_t) embedded_bytes);
|
||||
memcpy(&msg->data, skb->data, (size_t) embedded_bytes);
|
||||
skb_copy_from_linear_data(skb, &msg->data,
|
||||
(size_t)embedded_bytes);
|
||||
} else {
|
||||
msg->version = XPNET_VERSION;
|
||||
}
|
||||
|
|
|
@ -221,7 +221,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
|
|||
hdr->vpi = htons(vcc->vpi);
|
||||
hdr->vci = htons(vcc->vci);
|
||||
hdr->length = htonl(skb->len);
|
||||
memcpy(skb_put(new_skb,skb->len),skb->data,skb->len);
|
||||
skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
|
||||
if (vcc->pop) vcc->pop(vcc,skb);
|
||||
else dev_kfree_skb(skb);
|
||||
out_vcc->push(out_vcc,new_skb);
|
||||
|
@ -310,7 +310,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
|
|||
goto done;
|
||||
}
|
||||
__net_timestamp(new_skb);
|
||||
memcpy(skb_put(new_skb,skb->len),skb->data,skb->len);
|
||||
skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
|
||||
out_vcc->push(out_vcc,new_skb);
|
||||
atomic_inc(&vcc->stats->tx);
|
||||
atomic_inc(&out_vcc->stats->rx);
|
||||
|
|
|
@ -2395,7 +2395,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
|
|||
skb->destructor = ns_lb_destructor;
|
||||
#endif /* NS_USE_DESTRUCTORS */
|
||||
skb_push(skb, NS_SMBUFSIZE);
|
||||
memcpy(skb->data, sb->data, NS_SMBUFSIZE);
|
||||
skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE);
|
||||
skb_put(skb, len - NS_SMBUFSIZE);
|
||||
ATM_SKB(skb)->vcc = vcc;
|
||||
__net_timestamp(skb);
|
||||
|
@ -2479,7 +2479,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
|
|||
{
|
||||
/* Copy the small buffer to the huge buffer */
|
||||
sb = (struct sk_buff *) iov->iov_base;
|
||||
memcpy(hb->data, sb->data, iov->iov_len);
|
||||
skb_copy_from_linear_data(sb, hb->data, iov->iov_len);
|
||||
skb_put(hb, iov->iov_len);
|
||||
remaining = len - iov->iov_len;
|
||||
iov++;
|
||||
|
@ -2491,7 +2491,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
|
|||
{
|
||||
lb = (struct sk_buff *) iov->iov_base;
|
||||
tocopy = min_t(int, remaining, iov->iov_len);
|
||||
memcpy(skb_tail_pointer(hb), lb->data, tocopy);
|
||||
skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy);
|
||||
skb_put(hb, tocopy);
|
||||
iov++;
|
||||
remaining -= tocopy;
|
||||
|
|
|
@ -527,7 +527,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
|
|||
buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size;
|
||||
|
||||
memcpy(skb_put(nskb, 3), buf, 3);
|
||||
memcpy(skb_put(nskb, size), skb->data + sent, size);
|
||||
skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size);
|
||||
|
||||
sent += size;
|
||||
count -= size;
|
||||
|
|
|
@ -231,7 +231,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data)
|
|||
cr = (struct usb_ctrlrequest *) urb->setup_packet;
|
||||
cr->wLength = __cpu_to_le16(skb->len);
|
||||
|
||||
memcpy(urb->transfer_buffer, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len);
|
||||
urb->transfer_buffer_length = skb->len;
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
|
@ -250,7 +250,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data)
|
|||
skb = skb_dequeue(&data->tx_queue);
|
||||
|
||||
if (skb) {
|
||||
memcpy(urb->transfer_buffer, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len);
|
||||
urb->transfer_buffer_length = skb->len;
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
|
|
|
@ -425,7 +425,7 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
|
|||
return -ENOMEM;
|
||||
|
||||
skb_reserve(s, NSHL);
|
||||
memcpy(skb_put(s, skb->len), skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len);
|
||||
if (skb->len & 0x0001)
|
||||
*skb_put(s, 1) = 0; /* PAD */
|
||||
|
||||
|
|
|
@ -4169,7 +4169,7 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
netif_stop_queue(dev);
|
||||
|
||||
/* copy data to device buffers */
|
||||
memcpy(info->tx_buf, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, info->tx_buf, skb->len);
|
||||
info->tx_get = 0;
|
||||
info->tx_put = info->tx_count = skb->len;
|
||||
|
||||
|
|
|
@ -821,7 +821,8 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
|
|||
/*
|
||||
* copy the new data into our accumulation buffer.
|
||||
*/
|
||||
memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
|
||||
skb->len);
|
||||
ep->mpa_pkt_len += skb->len;
|
||||
|
||||
/*
|
||||
|
@ -940,7 +941,8 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
|
|||
/*
|
||||
* Copy the new data into our accumulation buffer.
|
||||
*/
|
||||
memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
|
||||
skb->len);
|
||||
ep->mpa_pkt_len += skb->len;
|
||||
|
||||
/*
|
||||
|
@ -1619,7 +1621,8 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
|||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
skb_pull(skb, sizeof(struct cpl_rdma_terminate));
|
||||
PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len);
|
||||
memcpy(ep->com.qp->attr.terminate_buffer, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
|
||||
skb->len);
|
||||
ep->com.qp->attr.terminate_msg_len = skb->len;
|
||||
ep->com.qp->attr.is_terminate_local = 0;
|
||||
return CPL_RET_BUF_DONE;
|
||||
|
|
|
@ -442,7 +442,7 @@ act2000_sendbuf(act2000_card *card, int channel, int ack, struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
skb_reserve(xmit_skb, 19);
|
||||
memcpy(skb_put(xmit_skb, len), skb->data, len);
|
||||
skb_copy_from_linear_data(skb, skb_put(xmit_skb, len), len);
|
||||
} else {
|
||||
xmit_skb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!xmit_skb) {
|
||||
|
|
|
@ -652,7 +652,7 @@ static int write_modem(struct cardstate *cs)
|
|||
* transmit data
|
||||
*/
|
||||
count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
|
||||
memcpy(ucs->bulk_out_buffer, bcs->tx_skb->data, count);
|
||||
skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
|
||||
skb_pull(bcs->tx_skb, count);
|
||||
atomic_set(&ucs->busy, 1);
|
||||
gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
|
||||
|
|
|
@ -404,7 +404,8 @@ static void b1dma_dispatch_tx(avmcard *card)
|
|||
printk(KERN_DEBUG "tx: put 0x%x len=%d\n",
|
||||
skb->data[2], txlen);
|
||||
#endif
|
||||
memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2);
|
||||
skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
|
||||
skb->len - 2);
|
||||
}
|
||||
txlen = (txlen + 3) & ~3;
|
||||
|
||||
|
|
|
@ -457,7 +457,8 @@ static void c4_dispatch_tx(avmcard *card)
|
|||
printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n",
|
||||
card->name, skb->data[2], txlen);
|
||||
#endif
|
||||
memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2);
|
||||
skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf,
|
||||
skb->len - 2);
|
||||
}
|
||||
txlen = (txlen + 3) & ~3;
|
||||
|
||||
|
|
|
@ -254,14 +254,16 @@ write_modem(struct BCState *bcs) {
|
|||
count = len;
|
||||
if (count > MAX_MODEM_BUF - fp) {
|
||||
count = MAX_MODEM_BUF - fp;
|
||||
memcpy(cs->hw.elsa.transbuf + fp, bcs->tx_skb->data, count);
|
||||
skb_copy_from_linear_data(bcs->tx_skb,
|
||||
cs->hw.elsa.transbuf + fp, count);
|
||||
skb_pull(bcs->tx_skb, count);
|
||||
cs->hw.elsa.transcnt += count;
|
||||
ret = count;
|
||||
count = len - count;
|
||||
fp = 0;
|
||||
}
|
||||
memcpy((cs->hw.elsa.transbuf + fp), bcs->tx_skb->data, count);
|
||||
skb_copy_from_linear_data(bcs->tx_skb,
|
||||
cs->hw.elsa.transbuf + fp, count);
|
||||
skb_pull(bcs->tx_skb, count);
|
||||
cs->hw.elsa.transcnt += count;
|
||||
ret += count;
|
||||
|
|
|
@ -1293,7 +1293,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
|
|||
oskb = skb;
|
||||
skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
|
||||
memcpy(skb_put(skb, i), header, i);
|
||||
memcpy(skb_put(skb, oskb->len), oskb->data, oskb->len);
|
||||
skb_copy_from_linear_data(oskb,
|
||||
skb_put(skb, oskb->len), oskb->len);
|
||||
dev_kfree_skb(oskb);
|
||||
}
|
||||
st->l2.l2l1(st, PH_PULL | INDICATION, skb);
|
||||
|
|
|
@ -398,7 +398,7 @@ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
|
|||
_len = CAPIMSG_LEN(skb->data);
|
||||
if (_len > 22) {
|
||||
_len2 = _len - 22;
|
||||
memcpy(msghead, skb->data, 22);
|
||||
skb_copy_from_linear_data(skb, msghead, 22);
|
||||
memcpy(skb->data + _len2, msghead, 22);
|
||||
skb_pull(skb, _len2);
|
||||
CAPIMSG_SETLEN(skb->data, 22);
|
||||
|
|
|
@ -113,7 +113,8 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
|
|||
(skb = hysdn_tx_netget(card)) != NULL)
|
||||
{
|
||||
if (skb->len <= maxlen) {
|
||||
memcpy(buf, skb->data, skb->len); /* copy the packet to the buffer */
|
||||
/* copy the packet to the buffer */
|
||||
skb_copy_from_linear_data(skb, buf, skb->len);
|
||||
*len = skb->len;
|
||||
*chan = CHAN_NDIS_DATA;
|
||||
card->net_tx_busy = 1; /* we are busy sending network data */
|
||||
|
@ -126,7 +127,7 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
|
|||
((skb = hycapi_tx_capiget(card)) != NULL) )
|
||||
{
|
||||
if (skb->len <= maxlen) {
|
||||
memcpy(buf, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, buf, skb->len);
|
||||
*len = skb->len;
|
||||
*chan = CHAN_CAPI;
|
||||
hycapi_tx_capiack(card);
|
||||
|
|
|
@ -829,7 +829,7 @@ isdn_readbchan(int di, int channel, u_char * buf, u_char * fp, int len, wait_que
|
|||
dflag = 0;
|
||||
}
|
||||
count_put = count_pull;
|
||||
memcpy(cp, skb->data, count_put);
|
||||
skb_copy_from_linear_data(skb, cp, count_put);
|
||||
cp += count_put;
|
||||
len -= count_put;
|
||||
#ifdef CONFIG_ISDN_AUDIO
|
||||
|
|
|
@ -1100,7 +1100,8 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
|
|||
goto drop_packet;
|
||||
}
|
||||
skb_put(skb, skb_old->len + 128);
|
||||
memcpy(skb->data, skb_old->data, skb_old->len);
|
||||
skb_copy_from_linear_data(skb_old, skb->data,
|
||||
skb_old->len);
|
||||
if (net_dev->local->ppp_slot < 0) {
|
||||
printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
|
||||
__FUNCTION__, net_dev->local->ppp_slot);
|
||||
|
@ -1902,7 +1903,9 @@ void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
|
|||
while( from != to ) {
|
||||
unsigned int len = from->len - MP_HEADER_LEN;
|
||||
|
||||
memcpy(skb_put(skb,len), from->data+MP_HEADER_LEN, len);
|
||||
skb_copy_from_linear_data_offset(from, MP_HEADER_LEN,
|
||||
skb_put(skb,len),
|
||||
len);
|
||||
frag = from->next;
|
||||
isdn_ppp_mp_free_skb(mp, from);
|
||||
from = frag;
|
||||
|
|
|
@ -415,7 +415,8 @@ isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card * card)
|
|||
spin_lock_irqsave(&card->isdnloop_lock, flags);
|
||||
nskb = dev_alloc_skb(skb->len);
|
||||
if (nskb) {
|
||||
memcpy(skb_put(nskb, len), skb->data, len);
|
||||
skb_copy_from_linear_data(skb,
|
||||
skb_put(nskb, len), len);
|
||||
skb_queue_tail(&card->bqueue[channel], nskb);
|
||||
dev_kfree_skb(skb);
|
||||
} else
|
||||
|
|
|
@ -429,8 +429,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan,
|
|||
if (!(info->data.setup.CallingPN = kmalloc(len - count + 1, GFP_ATOMIC)))
|
||||
return -1;
|
||||
|
||||
memcpy(info->data.setup.CallingPN, skb->data + count + 1,
|
||||
len - count);
|
||||
skb_copy_from_linear_data_offset(skb, count + 1,
|
||||
info->data.setup.CallingPN,
|
||||
len - count);
|
||||
info->data.setup.CallingPN[len - count] = 0;
|
||||
|
||||
}
|
||||
|
@ -457,8 +458,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan,
|
|||
if (!(info->data.setup.CalledPN = kmalloc(len - count + 1, GFP_ATOMIC)))
|
||||
return -1;
|
||||
|
||||
memcpy(info->data.setup.CalledPN, skb->data + count + 1,
|
||||
len - count);
|
||||
skb_copy_from_linear_data_offset(skb, count + 1,
|
||||
info->data.setup.CalledPN,
|
||||
len - count);
|
||||
info->data.setup.CalledPN[len - count] = 0;
|
||||
|
||||
}
|
||||
|
@ -539,7 +541,7 @@ int capi_decode_conn_actv_ind(struct pcbit_chan * chan, struct sk_buff *skb)
|
|||
|
||||
#ifdef DEBUG
|
||||
if (len > 1 && len < 31) {
|
||||
memcpy(str, skb->data + 2, len - 1);
|
||||
skb_copy_from_linear_data_offset(skb, 2, str, len - 1);
|
||||
str[len] = 0;
|
||||
printk(KERN_DEBUG "Connected Party Number: %s\n", str);
|
||||
}
|
||||
|
|
|
@ -697,7 +697,9 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
|
|||
}
|
||||
else
|
||||
{
|
||||
memcpy(dest_addr, priv->ule_skb->data, ETH_ALEN);
|
||||
skb_copy_from_linear_data(priv->ule_skb,
|
||||
dest_addr,
|
||||
ETH_ALEN);
|
||||
skb_pull(priv->ule_skb, ETH_ALEN);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -932,7 +932,7 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
|
|||
pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
|
||||
priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
|
||||
|
||||
memcpy(skb_put(skb, len), old_skb->data, len);
|
||||
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
|
||||
|
||||
pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
|
||||
priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
|
||||
|
@ -1093,7 +1093,7 @@ mpt_lan_receive_post_reply(struct net_device *dev,
|
|||
priv->RcvCtl[ctx].dma,
|
||||
priv->RcvCtl[ctx].len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
memcpy(skb_put(skb, l), old_skb->data, l);
|
||||
skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
|
||||
|
||||
pci_dma_sync_single_for_device(mpt_dev->pcidev,
|
||||
priv->RcvCtl[ctx].dma,
|
||||
|
@ -1122,7 +1122,7 @@ mpt_lan_receive_post_reply(struct net_device *dev,
|
|||
priv->RcvCtl[ctx].len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
memcpy(skb_put(skb, len), old_skb->data, len);
|
||||
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
|
||||
|
||||
pci_dma_sync_single_for_device(mpt_dev->pcidev,
|
||||
priv->RcvCtl[ctx].dma,
|
||||
|
|
|
@ -1025,7 +1025,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb)
|
|||
adapter->current_dma.start_time = jiffies;
|
||||
|
||||
if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
|
||||
memcpy(adapter->dma_buffer, skb->data, nlen);
|
||||
skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
|
||||
memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
|
||||
target = isa_virt_to_bus(adapter->dma_buffer);
|
||||
}
|
||||
|
|
|
@ -1145,7 +1145,7 @@ static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
if (len != skb->len)
|
||||
memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
|
||||
memcpy((char *) p->xmit_cbuffs[p->xmit_count], (char *) (skb->data), skb->len);
|
||||
skb_copy_from_linear_data(skb, p->xmit_cbuffs[p->xmit_count], skb->len);
|
||||
|
||||
#if (NUM_XMIT_BUFFS == 1)
|
||||
#ifdef NO_NOPCOMMANDS
|
||||
|
|
|
@ -567,7 +567,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
if (skb->len < ETH_ZLEN)
|
||||
memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
|
||||
memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen);
|
||||
skb_copy_from_linear_data(skb, &ib->tx_buf[entry][0], skblen);
|
||||
|
||||
/* Now, give the packet to the lance */
|
||||
ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
|
||||
|
|
|
@ -598,7 +598,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
|||
ib->btx_ring [entry].length = (-len) | 0xf000;
|
||||
ib->btx_ring [entry].misc = 0;
|
||||
|
||||
memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen);
|
||||
skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen);
|
||||
|
||||
/* Clear the slack of the packet, do I need this? */
|
||||
if (len != skblen)
|
||||
|
|
|
@ -273,7 +273,8 @@ static int ack_tx(struct net_device *dev, int acked)
|
|||
/* skb_pull(ackskb, ARC_HDR_SIZE); */
|
||||
|
||||
|
||||
memcpy(ackpkt, lp->outgoing.skb->data, ARC_HDR_SIZE+sizeof(struct arc_cap));
|
||||
skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
|
||||
ARC_HDR_SIZE + sizeof(struct arc_cap));
|
||||
ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */
|
||||
ackpkt->soft.cap.mes.ack=acked;
|
||||
|
||||
|
|
|
@ -453,7 +453,8 @@ bionet_send_packet(struct sk_buff *skb, struct net_device *dev) {
|
|||
stdma_lock(bionet_intr, NULL);
|
||||
local_irq_restore(flags);
|
||||
if( !STRAM_ADDR(buf+length-1) ) {
|
||||
memcpy(nic_packet->buffer, skb->data, length);
|
||||
skb_copy_from_linear_data(skb, nic_packet->buffer,
|
||||
length);
|
||||
buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer;
|
||||
}
|
||||
|
||||
|
|
|
@ -717,7 +717,8 @@ pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev) {
|
|||
|
||||
local_irq_restore(flags);
|
||||
if( !STRAM_ADDR(buf+length-1) ) {
|
||||
memcpy(nic_packet->buffer, skb->data, length);
|
||||
skb_copy_from_linear_data(skb, nic_packet->buffer,
|
||||
length);
|
||||
buf = (unsigned long)phys_nic_packet;
|
||||
}
|
||||
|
||||
|
|
|
@ -1125,7 +1125,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
|
||||
pDB = aup->tx_db_inuse[aup->tx_head];
|
||||
memcpy((void *)pDB->vaddr, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
|
||||
if (skb->len < ETH_ZLEN) {
|
||||
for (i=skb->len; i<ETH_ZLEN; i++) {
|
||||
((char *)pDB->vaddr)[i] = 0;
|
||||
|
|
|
@ -828,8 +828,8 @@ static int b44_rx(struct b44 *bp, int budget)
|
|||
skb_reserve(copy_skb, 2);
|
||||
skb_put(copy_skb, len);
|
||||
/* DMA sync done above, copy just the actual packet */
|
||||
memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
|
||||
|
||||
skb_copy_from_linear_data_offset(skb, bp->rx_offset,
|
||||
copy_skb->data, len);
|
||||
skb = copy_skb;
|
||||
}
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
@ -1006,7 +1006,8 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, skb_put(bounce_skb, len),
|
||||
skb->len);
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = bounce_skb;
|
||||
}
|
||||
|
|
|
@ -1884,10 +1884,8 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
|
|||
goto reuse_rx;
|
||||
|
||||
/* aligned copy */
|
||||
memcpy(new_skb->data,
|
||||
skb->data + bp->rx_offset - 2,
|
||||
len + 2);
|
||||
|
||||
skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
|
||||
new_skb->data, len + 2);
|
||||
skb_reserve(new_skb, 2);
|
||||
skb_put(new_skb, len);
|
||||
|
||||
|
|
|
@ -2846,8 +2846,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
|
|||
ctrl | TX_DESC_SOF, 0);
|
||||
entry = TX_DESC_NEXT(ring, entry);
|
||||
|
||||
memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
|
||||
len - tabort, tabort);
|
||||
skb_copy_from_linear_data_offset(skb, len - tabort,
|
||||
tx_tiny_buf(cp, ring, entry), tabort);
|
||||
mapping = tx_tiny_map(cp, ring, entry, tentry);
|
||||
cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
|
||||
(nr_frags == 0));
|
||||
|
|
|
@ -1062,7 +1062,7 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
|
|||
pci_unmap_addr(ce, dma_addr),
|
||||
pci_unmap_len(ce, dma_len),
|
||||
PCI_DMA_FROMDEVICE);
|
||||
memcpy(skb->data, ce->skb->data, len);
|
||||
skb_copy_from_linear_data(ce->skb, skb->data, len);
|
||||
pci_dma_sync_single_for_device(pdev,
|
||||
pci_unmap_addr(ce, dma_addr),
|
||||
pci_unmap_len(ce, dma_len),
|
||||
|
|
|
@ -913,7 +913,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
|
|||
if (skb->len <= WR_LEN - sizeof(*cpl)) {
|
||||
q->sdesc[pidx].skb = NULL;
|
||||
if (!skb->data_len)
|
||||
memcpy(&d->flit[2], skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, &d->flit[2],
|
||||
skb->len);
|
||||
else
|
||||
skb_copy_bits(skb, 0, &d->flit[2], skb->len);
|
||||
|
||||
|
@ -1771,7 +1772,7 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
|
|||
__skb_put(skb, len);
|
||||
pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
memcpy(skb->data, sd->t.skb->data, len);
|
||||
skb_copy_from_linear_data(sd->t.skb, skb->data, len);
|
||||
pci_dma_sync_single_for_device(adap->pdev, mapping, len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
} else if (!drop_thres)
|
||||
|
|
|
@ -741,7 +741,7 @@ static int dgrs_start_xmit(struct sk_buff *skb, struct net_device *devN)
|
|||
}
|
||||
|
||||
amt = min_t(unsigned int, len, rbdp->size - count);
|
||||
memcpy( (char *) S2H(rbdp->buf) + count, skb->data + i, amt);
|
||||
skb_copy_from_linear_data_offset(skb, i, S2H(rbdp->buf) + count, amt);
|
||||
i += amt;
|
||||
count += amt;
|
||||
len -= amt;
|
||||
|
|
|
@ -1804,8 +1804,9 @@ speedo_rx(struct net_device *dev)
|
|||
eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
|
||||
skb_put(skb, pkt_len);
|
||||
#else
|
||||
memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data,
|
||||
pkt_len);
|
||||
skb_copy_from_linear_data(sp->rx_skbuff[entry],
|
||||
skb_put(skb, pkt_len),
|
||||
pkt_len);
|
||||
#endif
|
||||
pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
|
||||
sizeof(struct RxFD) + pkt_len,
|
||||
|
|
|
@ -1306,7 +1306,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
|
|||
|
||||
if (skb_data_size >= headersize) {
|
||||
/* copy immediate data */
|
||||
memcpy(imm_data, skb->data, headersize);
|
||||
skb_copy_from_linear_data(skb, imm_data, headersize);
|
||||
swqe->immediate_data_length = headersize;
|
||||
|
||||
if (skb_data_size > headersize) {
|
||||
|
@ -1337,7 +1337,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb,
|
|||
*/
|
||||
if (skb_data_size >= SWQE2_MAX_IMM) {
|
||||
/* copy immediate data */
|
||||
memcpy(imm_data, skb->data, SWQE2_MAX_IMM);
|
||||
skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
|
||||
|
||||
swqe->immediate_data_length = SWQE2_MAX_IMM;
|
||||
|
||||
|
@ -1350,7 +1350,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb,
|
|||
swqe->descriptors++;
|
||||
}
|
||||
} else {
|
||||
memcpy(imm_data, skb->data, skb_data_size);
|
||||
skb_copy_from_linear_data(skb, imm_data, skb_data_size);
|
||||
swqe->immediate_data_length = skb_data_size;
|
||||
}
|
||||
}
|
||||
|
@ -1772,10 +1772,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
|
|||
/* copy (immediate) data */
|
||||
if (nfrags == 0) {
|
||||
/* data is in a single piece */
|
||||
memcpy(imm_data, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, imm_data, skb->len);
|
||||
} else {
|
||||
/* first copy data from the skb->data buffer ... */
|
||||
memcpy(imm_data, skb->data, skb->len - skb->data_len);
|
||||
skb_copy_from_linear_data(skb, imm_data,
|
||||
skb->len - skb->data_len);
|
||||
imm_data += skb->len - skb->data_len;
|
||||
|
||||
/* ... then copy data from the fragments */
|
||||
|
|
|
@ -551,7 +551,9 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget)
|
|||
skbn = dev_alloc_skb(pkt_len + 2);
|
||||
if (skbn != NULL) {
|
||||
skb_reserve(skbn, 2); /* align IP header */
|
||||
memcpy(skbn->data, skb->data, pkt_len);
|
||||
skb_copy_from_linear_data(skb
|
||||
skbn->data,
|
||||
pkt_len);
|
||||
/* swap */
|
||||
skbt = skb;
|
||||
skb = skbn;
|
||||
|
|
|
@ -160,7 +160,8 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget)
|
|||
skbn = dev_alloc_skb(pkt_len + 2);
|
||||
if (skbn != NULL) {
|
||||
skb_reserve(skbn, 2); /* align IP header */
|
||||
memcpy(skbn->data, skb->data, pkt_len);
|
||||
skb_copy_from_linear_data(skb,
|
||||
skbn->data, pkt_len);
|
||||
/* swap */
|
||||
skbt = skb;
|
||||
skb = skbn;
|
||||
|
@ -293,7 +294,8 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
|
|||
skbn = dev_alloc_skb(pkt_len + 2);
|
||||
if (skbn != NULL) {
|
||||
skb_reserve(skbn, 2); /* align IP header */
|
||||
memcpy(skbn->data, skb->data, pkt_len);
|
||||
skb_copy_from_linear_data(skb,
|
||||
skbn->data, pkt_len);
|
||||
/* swap */
|
||||
skbt = skb;
|
||||
skb = skbn;
|
||||
|
|
|
@ -930,7 +930,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
/* Transfer data to DMA buffer */
|
||||
i = priv->tx_head;
|
||||
memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1);
|
||||
skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
|
||||
priv->tx_len[i] = skb->len - 1;
|
||||
|
||||
/* Clear interrupts while we touch our circular buffers */
|
||||
|
|
|
@ -317,7 +317,9 @@ void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s)
|
|||
dev_kfree_skb_irq(skb);
|
||||
break;
|
||||
}
|
||||
memcpy(s->hdlctx.buffer, skb->data+1, pkt_len);
|
||||
skb_copy_from_linear_data_offset(skb, 1,
|
||||
s->hdlctx.buffer,
|
||||
pkt_len);
|
||||
dev_kfree_skb_irq(skb);
|
||||
s->hdlctx.bp = s->hdlctx.buffer;
|
||||
append_crc_ccitt(s->hdlctx.buffer, pkt_len);
|
||||
|
|
|
@ -638,7 +638,9 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp)
|
|||
dev_kfree_skb_any(skb);
|
||||
break;
|
||||
}
|
||||
memcpy(yp->tx_buf, skb->data + 1, yp->tx_len);
|
||||
skb_copy_from_linear_data_offset(skb->data, 1,
|
||||
yp->tx_buf,
|
||||
yp->tx_len);
|
||||
dev_kfree_skb_any(skb);
|
||||
yp->tx_count = 0;
|
||||
yp->tx_crcl = 0x21;
|
||||
|
|
|
@ -1443,7 +1443,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
if (len <= 104) {
|
||||
/* Short packet, let's copy it directly into the ring. */
|
||||
memcpy(desc->data, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, desc->data, skb->len);
|
||||
if (len < ETH_ZLEN) {
|
||||
/* Very short packet, pad with zeros at the end. */
|
||||
memset(desc->data + len, 0, ETH_ZLEN - len);
|
||||
|
|
|
@ -1472,9 +1472,8 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
self->stats.tx_bytes += skb->len;
|
||||
|
||||
memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data,
|
||||
skb->len);
|
||||
|
||||
skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
|
||||
skb->len);
|
||||
self->tx_fifo.len++;
|
||||
self->tx_fifo.free++;
|
||||
|
||||
|
|
|
@ -526,7 +526,7 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
if (aup->speed == 4000000) {
|
||||
/* FIR */
|
||||
memcpy((void *)pDB->vaddr, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
|
||||
ptxd->count_0 = skb->len & 0xff;
|
||||
ptxd->count_1 = (skb->len >> 8) & 0xff;
|
||||
|
||||
|
|
|
@ -1119,7 +1119,7 @@ dumpbufs(skb->data,skb->len,'>');
|
|||
else
|
||||
{
|
||||
len = skb->len;
|
||||
memcpy (self->tx_bufs[self->txs], skb->data, len);
|
||||
skb_copy_from_linear_data(skb, self->tx_bufs[self->txs], len);
|
||||
}
|
||||
self->ring->tx[self->txs].len = len & 0x0fff;
|
||||
|
||||
|
|
|
@ -441,7 +441,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
goto drop;
|
||||
}
|
||||
|
||||
memcpy(self->tx_buff + self->header_length, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, self->tx_buff + self->header_length, skb->len);
|
||||
|
||||
/* Change setting for next frame */
|
||||
if (self->capability & IUC_STIR421X) {
|
||||
|
@ -902,7 +902,7 @@ static void irda_usb_receive(struct urb *urb)
|
|||
|
||||
if(docopy) {
|
||||
/* Copy packet, so we can recycle the original */
|
||||
memcpy(newskb->data, skb->data, urb->actual_length);
|
||||
skb_copy_from_linear_data(skb, newskb->data, urb->actual_length);
|
||||
/* Deliver this new skb */
|
||||
dataskb = newskb;
|
||||
/* And hook the old skb to the URB
|
||||
|
|
|
@ -353,7 +353,7 @@ static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf)
|
|||
buf[0] = len & 0xff;
|
||||
buf[1] = (len >> 8) & 0xff;
|
||||
/* copy the data into the tx buffer. */
|
||||
memcpy(buf+2, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, buf + 2, skb->len);
|
||||
/* put the fcs in the last four bytes in little endian order. */
|
||||
buf[len - 4] = fcs & 0xff;
|
||||
buf[len - 3] = (fcs >> 8) & 0xff;
|
||||
|
@ -377,7 +377,7 @@ static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf)
|
|||
buf[0] = len & 0xff;
|
||||
buf[1] = (len >> 8) & 0xff;
|
||||
/* copy the data */
|
||||
memcpy(buf+2, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, buf + 2, skb->len);
|
||||
/* put the fcs in last two bytes in little endian order. */
|
||||
buf[len - 2] = fcs & 0xff;
|
||||
buf[len - 1] = (fcs >> 8) & 0xff;
|
||||
|
|
|
@ -1466,9 +1466,8 @@ static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
self->stats.tx_bytes += skb->len;
|
||||
|
||||
memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data,
|
||||
skb->len);
|
||||
|
||||
skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
|
||||
skb->len);
|
||||
self->tx_fifo.len++;
|
||||
self->tx_fifo.free++;
|
||||
|
||||
|
|
|
@ -484,7 +484,7 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
unsigned long mtt = irda_get_mtt(skb);
|
||||
|
||||
si->dma_tx_buff_len = skb->len;
|
||||
memcpy(si->dma_tx_buff, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
|
||||
|
||||
if (mtt)
|
||||
while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
|
||||
|
|
|
@ -1162,7 +1162,7 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
|
|||
self->new_speed = speed;
|
||||
}
|
||||
|
||||
memcpy(self->tx_buff.head, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, self->tx_buff.head, skb->len);
|
||||
|
||||
self->tx_buff.len = skb->len;
|
||||
self->tx_buff.data = self->tx_buff.head;
|
||||
|
|
|
@ -925,8 +925,8 @@ static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
|
|||
|
||||
self->tx_fifo.tail += skb->len;
|
||||
self->stats.tx_bytes += skb->len;
|
||||
memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data,
|
||||
skb->len);
|
||||
skb_copy_from_linear_data(skb,
|
||||
self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
|
||||
self->tx_fifo.len++;
|
||||
self->tx_fifo.free++;
|
||||
//F01 if (self->tx_fifo.len == 1) {
|
||||
|
|
|
@ -993,7 +993,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
goto drop;
|
||||
}
|
||||
else
|
||||
memcpy(rd->buf, skb->data, len);
|
||||
skb_copy_from_linear_data(skb, rd->buf, len);
|
||||
}
|
||||
|
||||
rd->skb = skb; /* remember skb for tx-complete stats */
|
||||
|
|
|
@ -529,7 +529,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* Decide if we should use PIO or DMA transfer */
|
||||
if (self->io.speed > PIO_MAX_SPEED) {
|
||||
self->tx_buff.data = self->tx_buff.head;
|
||||
memcpy(self->tx_buff.data, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
|
||||
self->tx_buff.len = skb->len;
|
||||
|
||||
mtt = irda_get_mtt(skb);
|
||||
|
|
|
@ -988,7 +988,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (lance_debug > 5)
|
||||
printk("%s: bouncing a high-memory packet (%#x).\n",
|
||||
dev->name, (u32)isa_virt_to_bus(skb->data));
|
||||
memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
|
||||
lp->tx_ring[entry].base =
|
||||
((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
|
||||
dev_kfree_skb(skb);
|
||||
|
|
|
@ -420,8 +420,7 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
|
|||
mp->stats.tx_bytes += skb->len;
|
||||
|
||||
/* We need to copy into our xmit buffer to take care of alignment and caching issues */
|
||||
|
||||
memcpy((void *) mp->tx_ring, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
|
||||
|
||||
/* load the Tx DMA and fire it off */
|
||||
|
||||
|
|
|
@ -608,7 +608,7 @@ static void meth_tx_short_prepare(struct meth_private *priv,
|
|||
|
||||
desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16);
|
||||
/* maybe I should set whole thing to 0 first... */
|
||||
memcpy(desc->data.dt + (120 - len), skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len);
|
||||
if (skb->len < len)
|
||||
memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len);
|
||||
}
|
||||
|
@ -626,8 +626,8 @@ static void meth_tx_1page_prepare(struct meth_private *priv,
|
|||
|
||||
/* unaligned part */
|
||||
if (unaligned_len) {
|
||||
memcpy(desc->data.dt + (120 - unaligned_len),
|
||||
skb->data, unaligned_len);
|
||||
skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
|
||||
unaligned_len);
|
||||
desc->header.raw |= (128 - unaligned_len) << 16;
|
||||
}
|
||||
|
||||
|
@ -652,8 +652,8 @@ static void meth_tx_2page_prepare(struct meth_private *priv,
|
|||
desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1);
|
||||
/* unaligned part */
|
||||
if (unaligned_len){
|
||||
memcpy(desc->data.dt + (120 - unaligned_len),
|
||||
skb->data, unaligned_len);
|
||||
skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
|
||||
unaligned_len);
|
||||
desc->header.raw |= (128 - unaligned_len) << 16;
|
||||
}
|
||||
|
||||
|
|
|
@ -502,7 +502,7 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
|
|||
copy_skb->dev = dev;
|
||||
DRX(("resv_and_put "));
|
||||
skb_put(copy_skb, len);
|
||||
memcpy(copy_skb->data, skb->data, len);
|
||||
skb_copy_from_linear_data(skb, copy_skb->data, len);
|
||||
|
||||
/* Reuse original ring buffer. */
|
||||
DRX(("reuse "));
|
||||
|
|
|
@ -920,8 +920,10 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
/* copy the next 64 bytes - should be enough except
|
||||
* for pathological case
|
||||
*/
|
||||
memcpy((void *)hwdesc, (void *)(skb->data) +
|
||||
first_hdr_len, hdr_len - first_hdr_len);
|
||||
skb_copy_from_linear_data_offset(skb, first_hdr_len,
|
||||
hwdesc,
|
||||
(hdr_len -
|
||||
first_hdr_len));
|
||||
producer = get_next_index(producer, max_tx_desc_count);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1182,7 +1182,7 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
else
|
||||
#endif
|
||||
{
|
||||
memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len);
|
||||
skb_copy_from_linear_data(skb, p->xmit_cbuffs[p->xmit_count], skb->len);
|
||||
len = skb->len;
|
||||
if (len < ETH_ZLEN) {
|
||||
len = ETH_ZLEN;
|
||||
|
|
|
@ -1176,8 +1176,9 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
|
||||
#endif
|
||||
|
||||
memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data,
|
||||
(skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len);
|
||||
skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
|
||||
skb->len > T_BUF_SIZE ? T_BUF_SIZE :
|
||||
skb->len);
|
||||
if (len > skb->len)
|
||||
memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
|
||||
dev_kfree_skb (skb);
|
||||
|
|
|
@ -1344,7 +1344,7 @@ static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
tp->tx_info[entry].skb = skb;
|
||||
/* tp->tx_info[entry].mapping = 0; */
|
||||
memcpy (tp->tx_buf[entry], skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len);
|
||||
|
||||
/* Note: the chip doesn't have auto-pad! */
|
||||
NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)),
|
||||
|
|
|
@ -1136,7 +1136,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
ei_block_output(dev, length, skb->data, output_page);
|
||||
else {
|
||||
memset(packet, 0, ETH_ZLEN);
|
||||
memcpy(packet, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, packet, skb->len);
|
||||
ei_block_output(dev, length, packet, output_page);
|
||||
}
|
||||
|
||||
|
|
|
@ -594,7 +594,8 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
|
|||
return NULL;
|
||||
}
|
||||
skb_reserve(npkt,2);
|
||||
memcpy(skb_put(npkt,skb->len), skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb,
|
||||
skb_put(npkt, skb->len), skb->len);
|
||||
kfree_skb(skb);
|
||||
skb = npkt;
|
||||
}
|
||||
|
|
|
@ -869,7 +869,8 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
|
|||
goto abort;
|
||||
|
||||
skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr));
|
||||
memcpy(skb_put(skb2, skb->len), skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, skb_put(skb2, skb->len),
|
||||
skb->len);
|
||||
} else {
|
||||
/* Make a clone so as to not disturb the original skb,
|
||||
* give dev_queue_xmit something it can free.
|
||||
|
|
|
@ -1927,7 +1927,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
|
|||
* Copy the ethhdr from first buffer to second. This
|
||||
* is necessary for 3022 IP completions.
|
||||
*/
|
||||
memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
|
||||
skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
|
||||
skb_push(skb2, size), size);
|
||||
} else {
|
||||
u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
|
||||
if (checksum &
|
||||
|
|
|
@ -1451,7 +1451,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
skb_reserve(new_skb, 8);
|
||||
skb_put(new_skb, len);
|
||||
memcpy(new_skb->data, skb->data, len);
|
||||
skb_copy_from_linear_data(skb, new_skb->data, len);
|
||||
dev_kfree_skb(skb);
|
||||
skb = new_skb;
|
||||
}
|
||||
|
|
|
@ -534,7 +534,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
* entry and the HPC got to the end of the chain before we
|
||||
* added this new entry and restarted it.
|
||||
*/
|
||||
memcpy((char *)(long)td->buf_vaddr, skb->data, skblen);
|
||||
skb_copy_from_linear_data(skb, (char *)(long)td->buf_vaddr, skblen);
|
||||
if (len != skblen)
|
||||
memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen);
|
||||
td->tdma.cntinfo = (len & HPCDMA_BCNT) |
|
||||
|
|
|
@ -2950,7 +2950,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
|
|||
pci_dma_sync_single_for_cpu(skge->hw->pdev,
|
||||
pci_unmap_addr(e, mapaddr),
|
||||
len, PCI_DMA_FROMDEVICE);
|
||||
memcpy(skb->data, e->skb->data, len);
|
||||
skb_copy_from_linear_data(e->skb, skb->data, len);
|
||||
pci_dma_sync_single_for_device(skge->hw->pdev,
|
||||
pci_unmap_addr(e, mapaddr),
|
||||
len, PCI_DMA_FROMDEVICE);
|
||||
|
|
|
@ -1971,7 +1971,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
|
|||
skb_reserve(skb, 2);
|
||||
pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
|
||||
length, PCI_DMA_FROMDEVICE);
|
||||
memcpy(skb->data, re->skb->data, length);
|
||||
skb_copy_from_linear_data(re->skb, skb->data, length);
|
||||
skb->ip_summed = re->skb->ip_summed;
|
||||
skb->csum = re->skb->csum;
|
||||
pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
|
||||
|
|
|
@ -1026,7 +1026,7 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
memset((char *)p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
|
||||
len = ETH_ZLEN;
|
||||
}
|
||||
memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len);
|
||||
skb_copy_from_linear_data(skb, p->xmit_cbuffs[p->xmit_count], skb->len);
|
||||
|
||||
#if (NUM_XMIT_BUFFS == 1)
|
||||
# ifdef NO_NOPCOMMANDS
|
||||
|
|
|
@ -629,7 +629,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
|
|||
head->length = (-len) | 0xf000;
|
||||
head->misc = 0;
|
||||
|
||||
memcpy( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
|
||||
skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len);
|
||||
if (len != skb->len)
|
||||
memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len);
|
||||
|
||||
|
|
|
@ -848,7 +848,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
|
|||
skb_reserve(copy_skb, 2);
|
||||
skb_put(copy_skb, len);
|
||||
pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
|
||||
memcpy(copy_skb->data, skb->data, len);
|
||||
skb_copy_from_linear_data(skb, copy_skb->data, len);
|
||||
pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
|
||||
|
||||
/* We'll reuse the original ring buffer. */
|
||||
|
|
|
@ -2061,7 +2061,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
|
|||
skb_reserve(copy_skb, 2);
|
||||
skb_put(copy_skb, len);
|
||||
hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE);
|
||||
memcpy(copy_skb->data, skb->data, len);
|
||||
skb_copy_from_linear_data(skb, copy_skb->data, len);
|
||||
hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE);
|
||||
|
||||
/* Reuse original ring buffer. */
|
||||
|
|
|
@ -1143,7 +1143,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
struct lance_init_block *ib = lp->init_block_mem;
|
||||
ib->btx_ring [entry].length = (-len) | 0xf000;
|
||||
ib->btx_ring [entry].misc = 0;
|
||||
memcpy((char *)&ib->tx_buf [entry][0], skb->data, skblen);
|
||||
skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen);
|
||||
if (len != skblen)
|
||||
memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
|
||||
ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
|
||||
|
|
|
@ -592,7 +592,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* Avoid a race... */
|
||||
qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
|
||||
|
||||
memcpy(txbuf, skb->data, len);
|
||||
skb_copy_from_linear_data(skb, txbuf, len);
|
||||
|
||||
qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
|
||||
qep->qe_block->qe_txd[entry].tx_flags =
|
||||
|
|
|
@ -3350,7 +3350,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
|
|||
skb_reserve(copy_skb, 2);
|
||||
skb_put(copy_skb, len);
|
||||
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
|
||||
memcpy(copy_skb->data, skb->data, len);
|
||||
skb_copy_from_linear_data(skb, copy_skb->data, len);
|
||||
pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
|
||||
|
||||
/* We'll reuse the original ring buffer. */
|
||||
|
|
|
@ -1112,7 +1112,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
|
|||
|
||||
if ( bbuf ) {
|
||||
tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
|
||||
memcpy( tail_buffer, skb->data, skb->len );
|
||||
skb_copy_from_linear_data(skb, tail_buffer, skb->len);
|
||||
} else {
|
||||
tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE);
|
||||
TLan_StoreSKB(tail_list, skb);
|
||||
|
|
|
@ -937,14 +937,17 @@ static void xl_rx(struct net_device *dev)
|
|||
copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ;
|
||||
frame_length -= copy_len ;
|
||||
pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
||||
memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, copy_len) ;
|
||||
skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
|
||||
skb_put(skb, copy_len),
|
||||
copy_len);
|
||||
pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
||||
adv_rx_ring(dev) ;
|
||||
}
|
||||
|
||||
/* Now we have found the last fragment */
|
||||
pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
||||
memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, frame_length) ;
|
||||
skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
|
||||
skb_put(skb,copy_len), frame_length);
|
||||
/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */
|
||||
pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
||||
adv_rx_ring(dev) ;
|
||||
|
|
|
@ -845,7 +845,9 @@ static void olympic_rx(struct net_device *dev)
|
|||
pci_dma_sync_single_for_cpu(olympic_priv->pdev,
|
||||
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
|
||||
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
||||
memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ;
|
||||
skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
|
||||
skb_put(skb,length - 4),
|
||||
length - 4);
|
||||
pci_dma_sync_single_for_device(olympic_priv->pdev,
|
||||
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
|
||||
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
||||
|
@ -862,7 +864,9 @@ static void olympic_rx(struct net_device *dev)
|
|||
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
||||
rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
|
||||
cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
|
||||
memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ;
|
||||
skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
|
||||
skb_put(skb, cpy_length),
|
||||
cpy_length);
|
||||
pci_dma_sync_single_for_device(olympic_priv->pdev,
|
||||
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
|
||||
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
||||
|
|
|
@ -644,7 +644,7 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device
|
|||
dmabuf = 0;
|
||||
i = tp->TplFree->TPLIndex;
|
||||
buf = tp->LocalTxBuffers[i];
|
||||
memcpy(buf, skb->data, length);
|
||||
skb_copy_from_linear_data(skb, buf, length);
|
||||
newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer;
|
||||
}
|
||||
else {
|
||||
|
|
|
@ -449,8 +449,8 @@ static void de_rx (struct de_private *de)
|
|||
} else {
|
||||
pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
|
||||
skb_reserve(copy_skb, RX_OFFSET);
|
||||
memcpy(skb_put(copy_skb, len), skb->data, len);
|
||||
|
||||
skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
|
||||
len);
|
||||
pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
|
||||
|
||||
/* We'll reuse the original ring buffer. */
|
||||
|
|
|
@ -682,7 +682,7 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
|
|||
|
||||
/* transmit this packet */
|
||||
txptr = db->tx_insert_ptr;
|
||||
memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
|
||||
txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
|
||||
|
||||
/* Point to next transmit free descriptor */
|
||||
|
@ -989,7 +989,9 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
|
|||
skb = newskb;
|
||||
/* size less than COPY_SIZE, allocate a rxlen SKB */
|
||||
skb_reserve(skb, 2); /* 16byte align */
|
||||
memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen);
|
||||
skb_copy_from_linear_data(rxptr->rx_skb_ptr,
|
||||
skb_put(skb, rxlen),
|
||||
rxlen);
|
||||
dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
|
||||
} else
|
||||
skb_put(skb, rxlen);
|
||||
|
|
|
@ -583,7 +583,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
/* transmit this packet */
|
||||
txptr = db->tx_insert_ptr;
|
||||
memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
|
||||
txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
|
||||
|
||||
/* Point to next transmit free descriptor */
|
||||
|
|
|
@ -411,9 +411,9 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
sometimes sends more than you ask it to. */
|
||||
|
||||
memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
|
||||
memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len);
|
||||
|
||||
|
||||
skb_copy_from_linear_data(skb,
|
||||
&(card->tx_buffer[bufferoffsets[desc] / 4]),
|
||||
skb->len);
|
||||
/* FIXME: The specification tells us that the length we send HAS to be a multiple of
|
||||
4 bytes. */
|
||||
|
||||
|
|
|
@ -915,7 +915,9 @@ xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
tp->tx_skbuff[entry] = skb;
|
||||
if (tp->chip_id == X3201_3) {
|
||||
memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len);
|
||||
skb_copy_from_linear_data(skb,
|
||||
tp->tx_aligned_skbuff[entry]->data,
|
||||
skb->len);
|
||||
tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
|
||||
} else
|
||||
tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
|
||||
|
|
|
@ -386,8 +386,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
|
|||
* - we are multicast promiscous.
|
||||
* - we belong to the multicast group.
|
||||
*/
|
||||
memcpy(addr, skb->data,
|
||||
min_t(size_t, sizeof addr, skb->len));
|
||||
skb_copy_from_linear_data(skb, addr, min_t(size_t, sizeof addr,
|
||||
skb->len));
|
||||
bit_nr = ether_crc(sizeof addr, addr) >> 26;
|
||||
if ((tun->if_flags & IFF_PROMISC) ||
|
||||
memcmp(addr, tun->dev_addr, sizeof addr) == 0 ||
|
||||
|
|
|
@ -1339,7 +1339,8 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
|
|||
if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
|
||||
skb_reserve(new_skb, 2);
|
||||
|
||||
memcpy(new_skb->data, rx_skb[0]->data, pkt_size);
|
||||
skb_copy_from_linear_data(rx_skb[0], new_skb->data,
|
||||
pkt_size);
|
||||
*rx_skb = new_skb;
|
||||
ret = 0;
|
||||
}
|
||||
|
@ -1927,7 +1928,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (pktlen < ETH_ZLEN) {
|
||||
/* Cannot occur until ZC support */
|
||||
pktlen = ETH_ZLEN;
|
||||
memcpy(tdinfo->buf, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
|
||||
memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
|
||||
tdinfo->skb = skb;
|
||||
tdinfo->skb_dma[0] = tdinfo->buf_dma;
|
||||
|
@ -1943,7 +1944,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
int nfrags = skb_shinfo(skb)->nr_frags;
|
||||
tdinfo->skb = skb;
|
||||
if (nfrags > 6) {
|
||||
memcpy(tdinfo->buf, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
|
||||
tdinfo->skb_dma[0] = tdinfo->buf_dma;
|
||||
td_ptr->tdesc0.pktsize =
|
||||
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
|
||||
|
|
|
@ -1702,7 +1702,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
|
|||
if(!nsb) {
|
||||
goto give_it_anyways;
|
||||
}
|
||||
memcpy(skb_put(nsb, len), skb->data, len);
|
||||
skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
|
||||
|
||||
nsb->protocol = lmc_proto_type(sc, skb);
|
||||
skb_reset_mac_header(nsb);
|
||||
|
|
|
@ -1765,7 +1765,7 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx)
|
|||
skb->data[7] = ']';
|
||||
skb->data[8] = ':';
|
||||
skb->data[9] = ' ';
|
||||
memcpy(&skb->data[10], skb_main->data, skb_main->len);
|
||||
skb_copy_from_linear_data(skb_main, &skb->data[10], skb_main->len);
|
||||
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
|
|
@ -1782,7 +1782,7 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
|
|||
*/
|
||||
c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
|
||||
c->tx_dma_used^=1; /* Flip temp buffer */
|
||||
memcpy(c->tx_next_ptr, skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
|
||||
}
|
||||
else
|
||||
c->tx_next_ptr=skb->data;
|
||||
|
|
|
@ -827,14 +827,14 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
if (priv->wep_is_on)
|
||||
frame_ctl |= IEEE80211_FCTL_PROTECTED;
|
||||
if (priv->operating_mode == IW_MODE_ADHOC) {
|
||||
memcpy(&header.addr1, skb->data, 6);
|
||||
skb_copy_from_linear_data(skb, &header.addr1, 6);
|
||||
memcpy(&header.addr2, dev->dev_addr, 6);
|
||||
memcpy(&header.addr3, priv->BSSID, 6);
|
||||
} else {
|
||||
frame_ctl |= IEEE80211_FCTL_TODS;
|
||||
memcpy(&header.addr1, priv->CurrentBSSID, 6);
|
||||
memcpy(&header.addr2, dev->dev_addr, 6);
|
||||
memcpy(&header.addr3, skb->data, 6);
|
||||
skb_copy_from_linear_data(skb, &header.addr3, 6);
|
||||
}
|
||||
|
||||
if (priv->use_wpa)
|
||||
|
|
|
@ -998,7 +998,8 @@ static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
|
|||
assert(0);
|
||||
return;
|
||||
}
|
||||
memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
|
||||
skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len),
|
||||
skb->len);
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = bounce_skb;
|
||||
}
|
||||
|
|
|
@ -933,12 +933,14 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
|
|||
if (frag == 0) {
|
||||
/* copy first fragment (including full headers) into
|
||||
* beginning of the fragment cache skb */
|
||||
memcpy(skb_put(frag_skb, flen), skb->data, flen);
|
||||
skb_copy_from_linear_data(skb, skb_put(frag_skb, flen),
|
||||
flen);
|
||||
} else {
|
||||
/* append frame payload to the end of the fragment
|
||||
* cache skb */
|
||||
memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
|
||||
flen);
|
||||
skb_copy_from_linear_data_offset(skb, hdrlen,
|
||||
skb_put(frag_skb,
|
||||
flen), flen);
|
||||
}
|
||||
dev_kfree_skb(skb);
|
||||
skb = NULL;
|
||||
|
@ -1044,8 +1046,9 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
|
|||
skb->len >= ETH_HLEN + ETH_ALEN) {
|
||||
/* Non-standard frame: get addr4 from its bogus location after
|
||||
* the payload */
|
||||
memcpy(skb->data + ETH_ALEN,
|
||||
skb->data + skb->len - ETH_ALEN, ETH_ALEN);
|
||||
skb_copy_from_linear_data_offset(skb, skb->len - ETH_ALEN,
|
||||
skb->data + ETH_ALEN,
|
||||
ETH_ALEN);
|
||||
skb_trim(skb, skb->len - ETH_ALEN);
|
||||
}
|
||||
|
||||
|
|
|
@ -146,7 +146,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
|
||||
/* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA,
|
||||
* Addr4 = SA */
|
||||
memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
|
||||
skb_copy_from_linear_data_offset(skb, ETH_ALEN,
|
||||
&hdr.addr4, ETH_ALEN);
|
||||
hdr_len += ETH_ALEN;
|
||||
} else {
|
||||
/* bogus 4-addr format to workaround Prism2 station
|
||||
|
@ -159,7 +160,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* SA from skb->data + ETH_ALEN will be added after
|
||||
* frame payload; use hdr.addr4 as a temporary buffer
|
||||
*/
|
||||
memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
|
||||
skb_copy_from_linear_data_offset(skb, ETH_ALEN,
|
||||
&hdr.addr4, ETH_ALEN);
|
||||
need_tailroom += ETH_ALEN;
|
||||
}
|
||||
|
||||
|
@ -174,24 +176,27 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
else
|
||||
memcpy(&hdr.addr1, local->bssid, ETH_ALEN);
|
||||
memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
|
||||
memcpy(&hdr.addr3, skb->data, ETH_ALEN);
|
||||
skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
|
||||
} else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) {
|
||||
fc |= IEEE80211_FCTL_FROMDS;
|
||||
/* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */
|
||||
memcpy(&hdr.addr1, skb->data, ETH_ALEN);
|
||||
skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
|
||||
memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
|
||||
memcpy(&hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
|
||||
skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3,
|
||||
ETH_ALEN);
|
||||
} else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) {
|
||||
fc |= IEEE80211_FCTL_TODS;
|
||||
/* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
|
||||
memcpy(&hdr.addr1, to_assoc_ap ?
|
||||
local->assoc_ap_addr : local->bssid, ETH_ALEN);
|
||||
memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
|
||||
memcpy(&hdr.addr3, skb->data, ETH_ALEN);
|
||||
skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
|
||||
ETH_ALEN);
|
||||
skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
|
||||
} else if (local->iw_mode == IW_MODE_ADHOC) {
|
||||
/* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
|
||||
memcpy(&hdr.addr1, skb->data, ETH_ALEN);
|
||||
memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
|
||||
skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
|
||||
skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
|
||||
ETH_ALEN);
|
||||
memcpy(&hdr.addr3, local->bssid, ETH_ALEN);
|
||||
}
|
||||
|
||||
|
|
|
@ -1277,8 +1277,8 @@ static char * ap_auth_make_challenge(struct ap_data *ap)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
memcpy(tmpbuf, skb->data + ap->crypt->extra_mpdu_prefix_len,
|
||||
WLAN_AUTH_CHALLENGE_LEN);
|
||||
skb_copy_from_linear_data_offset(skb, ap->crypt->extra_mpdu_prefix_len,
|
||||
tmpbuf, WLAN_AUTH_CHALLENGE_LEN);
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
return tmpbuf;
|
||||
|
|
|
@ -1838,13 +1838,14 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
/* skb->data starts with txdesc->frame_control */
|
||||
hdr_len = 24;
|
||||
memcpy(&txdesc.frame_control, skb->data, hdr_len);
|
||||
skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len);
|
||||
fc = le16_to_cpu(txdesc.frame_control);
|
||||
if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
|
||||
(fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS) &&
|
||||
skb->len >= 30) {
|
||||
/* Addr4 */
|
||||
memcpy(txdesc.addr4, skb->data + hdr_len, ETH_ALEN);
|
||||
skb_copy_from_linear_data_offset(skb, hdr_len, txdesc.addr4,
|
||||
ETH_ALEN);
|
||||
hdr_len += ETH_ALEN;
|
||||
}
|
||||
|
||||
|
|
|
@ -2416,8 +2416,9 @@ static void isr_rx(struct ipw2100_priv *priv, int i,
|
|||
#ifdef IPW2100_RX_DEBUG
|
||||
/* Make a copy of the frame so we can dump it to the logs if
|
||||
* ieee80211_rx fails */
|
||||
memcpy(packet_data, packet->skb->data,
|
||||
min_t(u32, status->frame_size, IPW_RX_NIC_BUFFER_LENGTH));
|
||||
skb_copy_from_linear_data(packet->skb, packet_data,
|
||||
min_t(u32, status->frame_size,
|
||||
IPW_RX_NIC_BUFFER_LENGTH));
|
||||
#endif
|
||||
|
||||
if (!ieee80211_rx(priv->ieee, packet->skb, stats)) {
|
||||
|
|
|
@ -10355,7 +10355,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
|
|||
|
||||
rt_hdr->it_len = dst->len;
|
||||
|
||||
memcpy(skb_put(dst, len), src->data, len);
|
||||
skb_copy_from_linear_data(src, skb_put(dst, len), len);
|
||||
|
||||
if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
|
||||
dev_kfree_skb_any(dst);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue