hippi: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'rr_init_one()' GFP_KERNEL can be used because it is a probe function and no spinlock is taken in the between. When memory is allocated in 'rr_open()' GFP_KERNEL can be used because it is a '.ndo_open' function (see struct net_device_ops) and no spinlock is taken in the between. '.ndo_open' functions are synchronized using the rtnl_lock() semaphore. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
161c4e88b7
commit
f33a7251c8
|
@ -151,7 +151,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
|
tmpptr = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
|
||||||
|
GFP_KERNEL);
|
||||||
rrpriv->tx_ring = tmpptr;
|
rrpriv->tx_ring = tmpptr;
|
||||||
rrpriv->tx_ring_dma = ring_dma;
|
rrpriv->tx_ring_dma = ring_dma;
|
||||||
|
|
||||||
|
@ -160,7 +161,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
|
tmpptr = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
|
||||||
|
GFP_KERNEL);
|
||||||
rrpriv->rx_ring = tmpptr;
|
rrpriv->rx_ring = tmpptr;
|
||||||
rrpriv->rx_ring_dma = ring_dma;
|
rrpriv->rx_ring_dma = ring_dma;
|
||||||
|
|
||||||
|
@ -169,7 +171,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma);
|
tmpptr = dma_alloc_coherent(&pdev->dev, EVT_RING_SIZE, &ring_dma,
|
||||||
|
GFP_KERNEL);
|
||||||
rrpriv->evt_ring = tmpptr;
|
rrpriv->evt_ring = tmpptr;
|
||||||
rrpriv->evt_ring_dma = ring_dma;
|
rrpriv->evt_ring_dma = ring_dma;
|
||||||
|
|
||||||
|
@ -198,14 +201,14 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (rrpriv->evt_ring)
|
if (rrpriv->evt_ring)
|
||||||
pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring,
|
dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rrpriv->evt_ring,
|
||||||
rrpriv->evt_ring_dma);
|
rrpriv->evt_ring_dma);
|
||||||
if (rrpriv->rx_ring)
|
if (rrpriv->rx_ring)
|
||||||
pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
|
dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rrpriv->rx_ring,
|
||||||
rrpriv->rx_ring_dma);
|
rrpriv->rx_ring_dma);
|
||||||
if (rrpriv->tx_ring)
|
if (rrpriv->tx_ring)
|
||||||
pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
|
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rrpriv->tx_ring,
|
||||||
rrpriv->tx_ring_dma);
|
rrpriv->tx_ring_dma);
|
||||||
if (rrpriv->regs)
|
if (rrpriv->regs)
|
||||||
pci_iounmap(pdev, rrpriv->regs);
|
pci_iounmap(pdev, rrpriv->regs);
|
||||||
if (pdev)
|
if (pdev)
|
||||||
|
@ -228,12 +231,12 @@ static void rr_remove_one(struct pci_dev *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
unregister_netdev(dev);
|
unregister_netdev(dev);
|
||||||
pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
|
dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rr->evt_ring,
|
||||||
rr->evt_ring_dma);
|
rr->evt_ring_dma);
|
||||||
pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
|
dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rr->rx_ring,
|
||||||
rr->rx_ring_dma);
|
rr->rx_ring_dma);
|
||||||
pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
|
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rr->tx_ring,
|
||||||
rr->tx_ring_dma);
|
rr->tx_ring_dma);
|
||||||
pci_iounmap(pdev, rr->regs);
|
pci_iounmap(pdev, rr->regs);
|
||||||
pci_release_regions(pdev);
|
pci_release_regions(pdev);
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
|
@ -648,8 +651,8 @@ static int rr_init1(struct net_device *dev)
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
rrpriv->rx_skbuff[i] = skb;
|
rrpriv->rx_skbuff[i] = skb;
|
||||||
addr = pci_map_single(rrpriv->pci_dev, skb->data,
|
addr = dma_map_single(&rrpriv->pci_dev->dev, skb->data,
|
||||||
dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
|
dev->mtu + HIPPI_HLEN, DMA_FROM_DEVICE);
|
||||||
/*
|
/*
|
||||||
* Sanity test to see if we conflict with the DMA
|
* Sanity test to see if we conflict with the DMA
|
||||||
* limitations of the Roadrunner.
|
* limitations of the Roadrunner.
|
||||||
|
@ -699,10 +702,10 @@ static int rr_init1(struct net_device *dev)
|
||||||
struct sk_buff *skb = rrpriv->rx_skbuff[i];
|
struct sk_buff *skb = rrpriv->rx_skbuff[i];
|
||||||
|
|
||||||
if (skb) {
|
if (skb) {
|
||||||
pci_unmap_single(rrpriv->pci_dev,
|
dma_unmap_single(&rrpriv->pci_dev->dev,
|
||||||
rrpriv->rx_ring[i].addr.addrlo,
|
rrpriv->rx_ring[i].addr.addrlo,
|
||||||
dev->mtu + HIPPI_HLEN,
|
dev->mtu + HIPPI_HLEN,
|
||||||
PCI_DMA_FROMDEVICE);
|
DMA_FROM_DEVICE);
|
||||||
rrpriv->rx_ring[i].size = 0;
|
rrpriv->rx_ring[i].size = 0;
|
||||||
set_rraddr(&rrpriv->rx_ring[i].addr, 0);
|
set_rraddr(&rrpriv->rx_ring[i].addr, 0);
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
|
@ -953,18 +956,18 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
goto defer;
|
goto defer;
|
||||||
} else {
|
} else {
|
||||||
pci_dma_sync_single_for_cpu(rrpriv->pci_dev,
|
dma_sync_single_for_cpu(&rrpriv->pci_dev->dev,
|
||||||
desc->addr.addrlo,
|
desc->addr.addrlo,
|
||||||
pkt_len,
|
pkt_len,
|
||||||
PCI_DMA_FROMDEVICE);
|
DMA_FROM_DEVICE);
|
||||||
|
|
||||||
skb_put_data(skb, rx_skb->data,
|
skb_put_data(skb, rx_skb->data,
|
||||||
pkt_len);
|
pkt_len);
|
||||||
|
|
||||||
pci_dma_sync_single_for_device(rrpriv->pci_dev,
|
dma_sync_single_for_device(&rrpriv->pci_dev->dev,
|
||||||
desc->addr.addrlo,
|
desc->addr.addrlo,
|
||||||
pkt_len,
|
pkt_len,
|
||||||
PCI_DMA_FROMDEVICE);
|
DMA_FROM_DEVICE);
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
struct sk_buff *newskb;
|
struct sk_buff *newskb;
|
||||||
|
@ -974,16 +977,17 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
|
||||||
if (newskb){
|
if (newskb){
|
||||||
dma_addr_t addr;
|
dma_addr_t addr;
|
||||||
|
|
||||||
pci_unmap_single(rrpriv->pci_dev,
|
dma_unmap_single(&rrpriv->pci_dev->dev,
|
||||||
desc->addr.addrlo, dev->mtu +
|
desc->addr.addrlo,
|
||||||
HIPPI_HLEN, PCI_DMA_FROMDEVICE);
|
dev->mtu + HIPPI_HLEN,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
skb = rx_skb;
|
skb = rx_skb;
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
rrpriv->rx_skbuff[index] = newskb;
|
rrpriv->rx_skbuff[index] = newskb;
|
||||||
addr = pci_map_single(rrpriv->pci_dev,
|
addr = dma_map_single(&rrpriv->pci_dev->dev,
|
||||||
newskb->data,
|
newskb->data,
|
||||||
dev->mtu + HIPPI_HLEN,
|
dev->mtu + HIPPI_HLEN,
|
||||||
PCI_DMA_FROMDEVICE);
|
DMA_FROM_DEVICE);
|
||||||
set_rraddr(&desc->addr, addr);
|
set_rraddr(&desc->addr, addr);
|
||||||
} else {
|
} else {
|
||||||
printk("%s: Out of memory, deferring "
|
printk("%s: Out of memory, deferring "
|
||||||
|
@ -1068,9 +1072,9 @@ static irqreturn_t rr_interrupt(int irq, void *dev_id)
|
||||||
dev->stats.tx_packets++;
|
dev->stats.tx_packets++;
|
||||||
dev->stats.tx_bytes += skb->len;
|
dev->stats.tx_bytes += skb->len;
|
||||||
|
|
||||||
pci_unmap_single(rrpriv->pci_dev,
|
dma_unmap_single(&rrpriv->pci_dev->dev,
|
||||||
desc->addr.addrlo, skb->len,
|
desc->addr.addrlo, skb->len,
|
||||||
PCI_DMA_TODEVICE);
|
DMA_TO_DEVICE);
|
||||||
dev_kfree_skb_irq(skb);
|
dev_kfree_skb_irq(skb);
|
||||||
|
|
||||||
rrpriv->tx_skbuff[txcon] = NULL;
|
rrpriv->tx_skbuff[txcon] = NULL;
|
||||||
|
@ -1110,8 +1114,9 @@ static inline void rr_raz_tx(struct rr_private *rrpriv,
|
||||||
if (skb) {
|
if (skb) {
|
||||||
struct tx_desc *desc = &(rrpriv->tx_ring[i]);
|
struct tx_desc *desc = &(rrpriv->tx_ring[i]);
|
||||||
|
|
||||||
pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
|
dma_unmap_single(&rrpriv->pci_dev->dev,
|
||||||
skb->len, PCI_DMA_TODEVICE);
|
desc->addr.addrlo, skb->len,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
desc->size = 0;
|
desc->size = 0;
|
||||||
set_rraddr(&desc->addr, 0);
|
set_rraddr(&desc->addr, 0);
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
|
@ -1132,8 +1137,10 @@ static inline void rr_raz_rx(struct rr_private *rrpriv,
|
||||||
if (skb) {
|
if (skb) {
|
||||||
struct rx_desc *desc = &(rrpriv->rx_ring[i]);
|
struct rx_desc *desc = &(rrpriv->rx_ring[i]);
|
||||||
|
|
||||||
pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
|
dma_unmap_single(&rrpriv->pci_dev->dev,
|
||||||
dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
|
desc->addr.addrlo,
|
||||||
|
dev->mtu + HIPPI_HLEN,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
desc->size = 0;
|
desc->size = 0;
|
||||||
set_rraddr(&desc->addr, 0);
|
set_rraddr(&desc->addr, 0);
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
|
@ -1188,17 +1195,17 @@ static int rr_open(struct net_device *dev)
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
rrpriv->rx_ctrl = pci_alloc_consistent(pdev,
|
rrpriv->rx_ctrl = dma_alloc_coherent(&pdev->dev,
|
||||||
256 * sizeof(struct ring_ctrl),
|
256 * sizeof(struct ring_ctrl),
|
||||||
&dma_addr);
|
&dma_addr, GFP_KERNEL);
|
||||||
if (!rrpriv->rx_ctrl) {
|
if (!rrpriv->rx_ctrl) {
|
||||||
ecode = -ENOMEM;
|
ecode = -ENOMEM;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
rrpriv->rx_ctrl_dma = dma_addr;
|
rrpriv->rx_ctrl_dma = dma_addr;
|
||||||
|
|
||||||
rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info),
|
rrpriv->info = dma_alloc_coherent(&pdev->dev, sizeof(struct rr_info),
|
||||||
&dma_addr);
|
&dma_addr, GFP_KERNEL);
|
||||||
if (!rrpriv->info) {
|
if (!rrpriv->info) {
|
||||||
ecode = -ENOMEM;
|
ecode = -ENOMEM;
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -1237,13 +1244,13 @@ static int rr_open(struct net_device *dev)
|
||||||
spin_unlock_irqrestore(&rrpriv->lock, flags);
|
spin_unlock_irqrestore(&rrpriv->lock, flags);
|
||||||
|
|
||||||
if (rrpriv->info) {
|
if (rrpriv->info) {
|
||||||
pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
|
dma_free_coherent(&pdev->dev, sizeof(struct rr_info),
|
||||||
rrpriv->info_dma);
|
rrpriv->info, rrpriv->info_dma);
|
||||||
rrpriv->info = NULL;
|
rrpriv->info = NULL;
|
||||||
}
|
}
|
||||||
if (rrpriv->rx_ctrl) {
|
if (rrpriv->rx_ctrl) {
|
||||||
pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
|
dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
|
||||||
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
|
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
|
||||||
rrpriv->rx_ctrl = NULL;
|
rrpriv->rx_ctrl = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1365,12 +1372,12 @@ static int rr_close(struct net_device *dev)
|
||||||
rr_raz_tx(rrpriv, dev);
|
rr_raz_tx(rrpriv, dev);
|
||||||
rr_raz_rx(rrpriv, dev);
|
rr_raz_rx(rrpriv, dev);
|
||||||
|
|
||||||
pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
|
dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
|
||||||
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
|
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
|
||||||
rrpriv->rx_ctrl = NULL;
|
rrpriv->rx_ctrl = NULL;
|
||||||
|
|
||||||
pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
|
dma_free_coherent(&pdev->dev, sizeof(struct rr_info), rrpriv->info,
|
||||||
rrpriv->info_dma);
|
rrpriv->info_dma);
|
||||||
rrpriv->info = NULL;
|
rrpriv->info = NULL;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&rrpriv->lock, flags);
|
spin_unlock_irqrestore(&rrpriv->lock, flags);
|
||||||
|
@ -1430,8 +1437,8 @@ static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
|
||||||
index = txctrl->pi;
|
index = txctrl->pi;
|
||||||
|
|
||||||
rrpriv->tx_skbuff[index] = skb;
|
rrpriv->tx_skbuff[index] = skb;
|
||||||
set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single(
|
set_rraddr(&rrpriv->tx_ring[index].addr,
|
||||||
rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE));
|
dma_map_single(&rrpriv->pci_dev->dev, skb->data, len + 8, DMA_TO_DEVICE));
|
||||||
rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
|
rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
|
||||||
rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
|
rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
|
||||||
txctrl->pi = (index + 1) % TX_RING_ENTRIES;
|
txctrl->pi = (index + 1) % TX_RING_ENTRIES;
|
||||||
|
|
Loading…
Reference in New Issue