net: neterion: vxge: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below. No GFP_ flag needs to be corrected. It has been compile tested. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
fb059b26bc
commit
8331bbe9ea
|
@ -1102,10 +1102,10 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
|
||||||
hldev = blockpool->hldev;
|
hldev = blockpool->hldev;
|
||||||
|
|
||||||
list_for_each_safe(p, n, &blockpool->free_block_list) {
|
list_for_each_safe(p, n, &blockpool->free_block_list) {
|
||||||
pci_unmap_single(hldev->pdev,
|
dma_unmap_single(&hldev->pdev->dev,
|
||||||
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
|
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
|
||||||
((struct __vxge_hw_blockpool_entry *)p)->length,
|
((struct __vxge_hw_blockpool_entry *)p)->length,
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
vxge_os_dma_free(hldev->pdev,
|
vxge_os_dma_free(hldev->pdev,
|
||||||
((struct __vxge_hw_blockpool_entry *)p)->memblock,
|
((struct __vxge_hw_blockpool_entry *)p)->memblock,
|
||||||
|
@ -1178,10 +1178,10 @@ __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
|
||||||
goto blockpool_create_exit;
|
goto blockpool_create_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_addr = pci_map_single(hldev->pdev, memblock,
|
dma_addr = dma_map_single(&hldev->pdev->dev, memblock,
|
||||||
VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
|
VXGE_HW_BLOCK_SIZE,
|
||||||
if (unlikely(pci_dma_mapping_error(hldev->pdev,
|
DMA_BIDIRECTIONAL);
|
||||||
dma_addr))) {
|
if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) {
|
||||||
vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
|
vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
|
||||||
__vxge_hw_blockpool_destroy(blockpool);
|
__vxge_hw_blockpool_destroy(blockpool);
|
||||||
status = VXGE_HW_ERR_OUT_OF_MEMORY;
|
status = VXGE_HW_ERR_OUT_OF_MEMORY;
|
||||||
|
@ -2264,10 +2264,10 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_addr = pci_map_single(devh->pdev, block_addr, length,
|
dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length,
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
|
if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) {
|
||||||
vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
|
vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
|
||||||
blockpool->req_out--;
|
blockpool->req_out--;
|
||||||
goto exit;
|
goto exit;
|
||||||
|
@ -2359,11 +2359,10 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
|
||||||
if (!memblock)
|
if (!memblock)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
dma_object->addr = pci_map_single(devh->pdev, memblock, size,
|
dma_object->addr = dma_map_single(&devh->pdev->dev, memblock,
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
size, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (unlikely(pci_dma_mapping_error(devh->pdev,
|
if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) {
|
||||||
dma_object->addr))) {
|
|
||||||
vxge_os_dma_free(devh->pdev, memblock,
|
vxge_os_dma_free(devh->pdev, memblock,
|
||||||
&dma_object->acc_handle);
|
&dma_object->acc_handle);
|
||||||
memblock = NULL;
|
memblock = NULL;
|
||||||
|
@ -2410,11 +2409,10 @@ __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
|
||||||
if (blockpool->pool_size < blockpool->pool_max)
|
if (blockpool->pool_size < blockpool->pool_max)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
pci_unmap_single(
|
dma_unmap_single(&(blockpool->hldev)->pdev->dev,
|
||||||
(blockpool->hldev)->pdev,
|
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
|
||||||
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
|
((struct __vxge_hw_blockpool_entry *)p)->length,
|
||||||
((struct __vxge_hw_blockpool_entry *)p)->length,
|
DMA_BIDIRECTIONAL);
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
|
||||||
|
|
||||||
vxge_os_dma_free(
|
vxge_os_dma_free(
|
||||||
(blockpool->hldev)->pdev,
|
(blockpool->hldev)->pdev,
|
||||||
|
@ -2445,8 +2443,8 @@ static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
|
||||||
blockpool = &devh->block_pool;
|
blockpool = &devh->block_pool;
|
||||||
|
|
||||||
if (size != blockpool->block_size) {
|
if (size != blockpool->block_size) {
|
||||||
pci_unmap_single(devh->pdev, dma_object->addr, size,
|
dma_unmap_single(&devh->pdev->dev, dma_object->addr, size,
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
|
vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
|
|
|
@ -241,10 +241,10 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
|
||||||
rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
|
rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
|
||||||
|
|
||||||
rx_priv->skb_data = rx_priv->skb->data;
|
rx_priv->skb_data = rx_priv->skb->data;
|
||||||
dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
|
dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data,
|
||||||
rx_priv->data_size, PCI_DMA_FROMDEVICE);
|
rx_priv->data_size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
|
if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) {
|
||||||
ring->stats.pci_map_fail++;
|
ring->stats.pci_map_fail++;
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
@ -323,8 +323,8 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
|
||||||
static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
|
static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
|
||||||
struct vxge_rx_priv *rx_priv)
|
struct vxge_rx_priv *rx_priv)
|
||||||
{
|
{
|
||||||
pci_dma_sync_single_for_device(ring->pdev,
|
dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma,
|
||||||
rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
|
rx_priv->data_size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
|
vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
|
||||||
vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
|
vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
|
||||||
|
@ -425,8 +425,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
|
||||||
if (!vxge_rx_map(dtr, ring)) {
|
if (!vxge_rx_map(dtr, ring)) {
|
||||||
skb_put(skb, pkt_length);
|
skb_put(skb, pkt_length);
|
||||||
|
|
||||||
pci_unmap_single(ring->pdev, data_dma,
|
dma_unmap_single(&ring->pdev->dev,
|
||||||
data_size, PCI_DMA_FROMDEVICE);
|
data_dma, data_size,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
|
|
||||||
vxge_hw_ring_rxd_pre_post(ringh, dtr);
|
vxge_hw_ring_rxd_pre_post(ringh, dtr);
|
||||||
vxge_post(&dtr_cnt, &first_dtr, dtr,
|
vxge_post(&dtr_cnt, &first_dtr, dtr,
|
||||||
|
@ -458,9 +459,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
|
||||||
skb_reserve(skb_up,
|
skb_reserve(skb_up,
|
||||||
VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
|
VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
|
||||||
|
|
||||||
pci_dma_sync_single_for_cpu(ring->pdev,
|
dma_sync_single_for_cpu(&ring->pdev->dev,
|
||||||
data_dma, data_size,
|
data_dma, data_size,
|
||||||
PCI_DMA_FROMDEVICE);
|
DMA_FROM_DEVICE);
|
||||||
|
|
||||||
vxge_debug_mem(VXGE_TRACE,
|
vxge_debug_mem(VXGE_TRACE,
|
||||||
"%s: %s:%d skb_up = %p",
|
"%s: %s:%d skb_up = %p",
|
||||||
|
@ -585,13 +586,13 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* for unfragmented skb */
|
/* for unfragmented skb */
|
||||||
pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
|
dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
|
||||||
skb_headlen(skb), PCI_DMA_TODEVICE);
|
skb_headlen(skb), DMA_TO_DEVICE);
|
||||||
|
|
||||||
for (j = 0; j < frg_cnt; j++) {
|
for (j = 0; j < frg_cnt; j++) {
|
||||||
pci_unmap_page(fifo->pdev,
|
dma_unmap_page(&fifo->pdev->dev,
|
||||||
txd_priv->dma_buffers[i++],
|
txd_priv->dma_buffers[i++],
|
||||||
skb_frag_size(frag), PCI_DMA_TODEVICE);
|
skb_frag_size(frag), DMA_TO_DEVICE);
|
||||||
frag += 1;
|
frag += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -897,10 +898,10 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
|
|
||||||
first_frg_len = skb_headlen(skb);
|
first_frg_len = skb_headlen(skb);
|
||||||
|
|
||||||
dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
|
dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data,
|
||||||
PCI_DMA_TODEVICE);
|
first_frg_len, DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
|
if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) {
|
||||||
vxge_hw_fifo_txdl_free(fifo_hw, dtr);
|
vxge_hw_fifo_txdl_free(fifo_hw, dtr);
|
||||||
fifo->stats.pci_map_fail++;
|
fifo->stats.pci_map_fail++;
|
||||||
goto _exit0;
|
goto _exit0;
|
||||||
|
@ -977,12 +978,12 @@ _exit1:
|
||||||
j = 0;
|
j = 0;
|
||||||
frag = &skb_shinfo(skb)->frags[0];
|
frag = &skb_shinfo(skb)->frags[0];
|
||||||
|
|
||||||
pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
|
dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++],
|
||||||
skb_headlen(skb), PCI_DMA_TODEVICE);
|
skb_headlen(skb), DMA_TO_DEVICE);
|
||||||
|
|
||||||
for (; j < i; j++) {
|
for (; j < i; j++) {
|
||||||
pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
|
dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j],
|
||||||
skb_frag_size(frag), PCI_DMA_TODEVICE);
|
skb_frag_size(frag), DMA_TO_DEVICE);
|
||||||
frag += 1;
|
frag += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1012,8 +1013,8 @@ vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
|
||||||
if (state != VXGE_HW_RXD_STATE_POSTED)
|
if (state != VXGE_HW_RXD_STATE_POSTED)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pci_unmap_single(ring->pdev, rx_priv->data_dma,
|
dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma,
|
||||||
rx_priv->data_size, PCI_DMA_FROMDEVICE);
|
rx_priv->data_size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
dev_kfree_skb(rx_priv->skb);
|
dev_kfree_skb(rx_priv->skb);
|
||||||
rx_priv->skb_data = NULL;
|
rx_priv->skb_data = NULL;
|
||||||
|
@ -1048,12 +1049,12 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
|
||||||
frag = &skb_shinfo(skb)->frags[0];
|
frag = &skb_shinfo(skb)->frags[0];
|
||||||
|
|
||||||
/* for unfragmented skb */
|
/* for unfragmented skb */
|
||||||
pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
|
dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
|
||||||
skb_headlen(skb), PCI_DMA_TODEVICE);
|
skb_headlen(skb), DMA_TO_DEVICE);
|
||||||
|
|
||||||
for (j = 0; j < frg_cnt; j++) {
|
for (j = 0; j < frg_cnt; j++) {
|
||||||
pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
|
dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
|
||||||
skb_frag_size(frag), PCI_DMA_TODEVICE);
|
skb_frag_size(frag), DMA_TO_DEVICE);
|
||||||
frag += 1;
|
frag += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4387,21 +4388,20 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
|
||||||
goto _exit0;
|
goto _exit0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
|
||||||
vxge_debug_ll_config(VXGE_TRACE,
|
vxge_debug_ll_config(VXGE_TRACE,
|
||||||
"%s : using 64bit DMA", __func__);
|
"%s : using 64bit DMA", __func__);
|
||||||
|
|
||||||
high_dma = 1;
|
high_dma = 1;
|
||||||
|
|
||||||
if (pci_set_consistent_dma_mask(pdev,
|
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
|
||||||
DMA_BIT_MASK(64))) {
|
|
||||||
vxge_debug_init(VXGE_ERR,
|
vxge_debug_init(VXGE_ERR,
|
||||||
"%s : unable to obtain 64bit DMA for "
|
"%s : unable to obtain 64bit DMA for "
|
||||||
"consistent allocations", __func__);
|
"consistent allocations", __func__);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto _exit1;
|
goto _exit1;
|
||||||
}
|
}
|
||||||
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
|
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||||
vxge_debug_ll_config(VXGE_TRACE,
|
vxge_debug_ll_config(VXGE_TRACE,
|
||||||
"%s : using 32bit DMA", __func__);
|
"%s : using 32bit DMA", __func__);
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue