amd-xgbe: Fix DMA API debug warning
When running a kernel configured with CONFIG_DMA_API_DEBUG=y a warning is issued: DMA-API: device driver tries to sync DMA memory it has not allocated This warning is the result of mapping the full range of the Rx buffer pages allocated and then performing a dma_sync_single_for_cpu against a calculated DMA address. The proper thing to do is to use the dma_sync_single_range_for_cpu with a base DMA address and an offset. Reported-by: Kim Phillips <kim.phillips@arm.com> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Tested-by: Kim Phillips <kim.phillips@arm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
95ec655bc4
commit
cfbfd86bfd
|
@ -303,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
|
|||
get_page(pa->pages);
|
||||
bd->pa = *pa;
|
||||
|
||||
bd->dma = pa->pages_dma + pa->pages_offset;
|
||||
bd->dma_base = pa->pages_dma;
|
||||
bd->dma_off = pa->pages_offset;
|
||||
bd->dma_len = len;
|
||||
|
||||
pa->pages_offset += len;
|
||||
|
|
|
@ -1110,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
|
|||
unsigned int rx_usecs = pdata->rx_usecs;
|
||||
unsigned int rx_frames = pdata->rx_frames;
|
||||
unsigned int inte;
|
||||
dma_addr_t hdr_dma, buf_dma;
|
||||
|
||||
if (!rx_usecs && !rx_frames) {
|
||||
/* No coalescing, interrupt for every descriptor */
|
||||
|
@ -1129,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
|
|||
* Set buffer 2 (hi) address to buffer dma address (hi) and
|
||||
* set control bits OWN and INTE
|
||||
*/
|
||||
rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
|
||||
rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
|
||||
rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
|
||||
rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
|
||||
hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
|
||||
buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
|
||||
rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
|
||||
rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
|
||||
rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
|
||||
rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
|
||||
|
||||
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
|
||||
|
||||
|
|
|
@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
|
|||
/* Start with the header buffer which may contain just the header
|
||||
* or the header plus data
|
||||
*/
|
||||
dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
|
||||
rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
|
||||
dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
|
||||
rdata->rx.hdr.dma_off,
|
||||
rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
|
||||
|
||||
packet = page_address(rdata->rx.hdr.pa.pages) +
|
||||
rdata->rx.hdr.pa.pages_offset;
|
||||
|
@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
|
|||
len -= copy_len;
|
||||
if (len) {
|
||||
/* Add the remaining data as a frag */
|
||||
dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
|
||||
rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
|
||||
dma_sync_single_range_for_cpu(pdata->dev,
|
||||
rdata->rx.buf.dma_base,
|
||||
rdata->rx.buf.dma_off,
|
||||
rdata->rx.buf.dma_len,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
rdata->rx.buf.pa.pages,
|
||||
|
@ -1945,8 +1949,9 @@ read_again:
|
|||
if (!skb)
|
||||
error = 1;
|
||||
} else if (rdesc_len) {
|
||||
dma_sync_single_for_cpu(pdata->dev,
|
||||
rdata->rx.buf.dma,
|
||||
dma_sync_single_range_for_cpu(pdata->dev,
|
||||
rdata->rx.buf.dma_base,
|
||||
rdata->rx.buf.dma_off,
|
||||
rdata->rx.buf.dma_len,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
|
|
|
@ -337,7 +337,8 @@ struct xgbe_buffer_data {
|
|||
struct xgbe_page_alloc pa;
|
||||
struct xgbe_page_alloc pa_unmap;
|
||||
|
||||
dma_addr_t dma;
|
||||
dma_addr_t dma_base;
|
||||
unsigned long dma_off;
|
||||
unsigned int dma_len;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue