ibmveth: Use dma_alloc_coherent() instead of kmalloc/dma_map_single()

Replacing kmalloc/kfree/dma_map_single/dma_unmap_single()
with dma_alloc_coherent/dma_free_coherent() helps to reduce
code size, and simplify the code, and coherent DMA will not
clear the cache every time.

Signed-off-by: Cai Huoqing <caihuoqing@baidu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Cai Huoqing 2021-09-26 14:52:14 +08:00 committed by David S. Miller
parent f947fcaffd
commit 4247ef0269
1 changed files with 9 additions and 16 deletions

View File

@ -605,17 +605,13 @@ static int ibmveth_open(struct net_device *netdev)
}
rc = -ENOMEM;
adapter->bounce_buffer =
kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
if (!adapter->bounce_buffer)
goto out_free_irq;
adapter->bounce_buffer_dma =
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
netdev_err(netdev, "unable to map bounce buffer\n");
goto out_free_bounce_buffer;
adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev,
netdev->mtu + IBMVETH_BUFF_OH,
&adapter->bounce_buffer_dma, GFP_KERNEL);
if (!adapter->bounce_buffer) {
netdev_err(netdev, "unable to alloc bounce buffer\n");
goto out_free_irq;
}
netdev_dbg(netdev, "initial replenish cycle\n");
@ -627,8 +623,6 @@ static int ibmveth_open(struct net_device *netdev)
return 0;
out_free_bounce_buffer:
kfree(adapter->bounce_buffer);
out_free_irq:
free_irq(netdev->irq, netdev);
out_free_buffer_pools:
@ -702,10 +696,9 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
adapter->netdev->mtu + IBMVETH_BUFF_OH,
DMA_BIDIRECTIONAL);
kfree(adapter->bounce_buffer);
dma_free_coherent(&adapter->vdev->dev,
adapter->netdev->mtu + IBMVETH_BUFF_OH,
adapter->bounce_buffer, adapter->bounce_buffer_dma);
netdev_dbg(netdev, "close complete\n");