ibmveth: Fix alignment of rx queue bug

This patch fixes a bug found by Nish Aravamudan
(https://lkml.org/lkml/2012/5/15/220) where the driver is not following
the spec (it is not aligning the rx buffer on a 16-byte boundary) and the
hypervisor aborts the registration, making the device unusable.

The fix follows BenH's recommendation (https://lkml.org/lkml/2012/7/20/461)
to replace the kmalloc+map for a single call to dma_alloc_coherent()
because that function always aligns to a 16-byte boundary.

The stable trees will run into this bug whenever the rx buffer kmalloc call
returns something not aligned on a 16-byte boundary.

Cc: <stable@vger.kernel.org>
Signed-off-by: Santiago Leon <santil@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Santiago Leon 2012-09-04 14:41:37 +00:00 committed by David S. Miller
parent d013ef2aba
commit d90c92fee8
1 changed files with 9 additions and 17 deletions

View File

@ -472,14 +472,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
} }
if (adapter->rx_queue.queue_addr != NULL) { if (adapter->rx_queue.queue_addr != NULL) {
if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { dma_free_coherent(dev, adapter->rx_queue.queue_len,
dma_unmap_single(dev, adapter->rx_queue.queue_addr,
adapter->rx_queue.queue_dma, adapter->rx_queue.queue_dma);
adapter->rx_queue.queue_len,
DMA_BIDIRECTIONAL);
adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
}
kfree(adapter->rx_queue.queue_addr);
adapter->rx_queue.queue_addr = NULL; adapter->rx_queue.queue_addr = NULL;
} }
@ -556,10 +551,13 @@ static int ibmveth_open(struct net_device *netdev)
goto err_out; goto err_out;
} }
dev = &adapter->vdev->dev;
adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
rxq_entries; rxq_entries;
adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, adapter->rx_queue.queue_addr =
GFP_KERNEL); dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
&adapter->rx_queue.queue_dma, GFP_KERNEL);
if (!adapter->rx_queue.queue_addr) { if (!adapter->rx_queue.queue_addr) {
netdev_err(netdev, "unable to allocate rx queue pages\n"); netdev_err(netdev, "unable to allocate rx queue pages\n");
@ -567,19 +565,13 @@ static int ibmveth_open(struct net_device *netdev)
goto err_out; goto err_out;
} }
dev = &adapter->vdev->dev;
adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_dma = dma_map_single(dev,
adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_dma = dma_map_single(dev,
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
adapter->rx_queue.queue_dma = dma_map_single(dev,
adapter->rx_queue.queue_addr,
adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
(dma_mapping_error(dev, adapter->filter_list_dma)) || (dma_mapping_error(dev, adapter->filter_list_dma))) {
(dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
netdev_err(netdev, "unable to map filter or buffer list " netdev_err(netdev, "unable to map filter or buffer list "
"pages\n"); "pages\n");
rc = -ENOMEM; rc = -ENOMEM;