iavf: Fix handling of dummy receive descriptors
Fix memory leak caused by not handling dummy receive descriptor properly.
iavf_get_rx_buffer now sets the rx_buffer return value for dummy receive
descriptors. Without this patch, when the hardware writes a dummy
descriptor, iavf would not free the page allocated for the previous receive
buffer. This is an unlikely event but can still happen.
[Jesse: massaged commit message]
Fixes: efa14c3985
("iavf: allow null RX descriptors")
Signed-off-by: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
parent
4635fd3a9d
commit
a9f49e0060
|
@ -1285,11 +1285,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
|
||||||
{
|
{
|
||||||
struct iavf_rx_buffer *rx_buffer;
|
struct iavf_rx_buffer *rx_buffer;
|
||||||
|
|
||||||
if (!size)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
|
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
|
||||||
prefetchw(rx_buffer->page);
|
prefetchw(rx_buffer->page);
|
||||||
|
if (!size)
|
||||||
|
return rx_buffer;
|
||||||
|
|
||||||
/* we are reusing so sync this buffer for CPU use */
|
/* we are reusing so sync this buffer for CPU use */
|
||||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||||
|
|
Loading…
Reference in New Issue