Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
Ben Hutchings says: ==================== A single fix by Alexandre Rames for the recent changes to TSO. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
853b185ac9
|
@ -141,6 +141,8 @@ struct efx_special_buffer {
|
||||||
* @len: Length of this fragment.
|
* @len: Length of this fragment.
|
||||||
* This field is zero when the queue slot is empty.
|
* This field is zero when the queue slot is empty.
|
||||||
* @unmap_len: Length of this fragment to unmap
|
* @unmap_len: Length of this fragment to unmap
|
||||||
|
* @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
|
||||||
|
* Only valid if @unmap_len != 0.
|
||||||
*/
|
*/
|
||||||
struct efx_tx_buffer {
|
struct efx_tx_buffer {
|
||||||
union {
|
union {
|
||||||
|
@ -154,6 +156,7 @@ struct efx_tx_buffer {
|
||||||
unsigned short flags;
|
unsigned short flags;
|
||||||
unsigned short len;
|
unsigned short len;
|
||||||
unsigned short unmap_len;
|
unsigned short unmap_len;
|
||||||
|
unsigned short dma_offset;
|
||||||
};
|
};
|
||||||
#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
|
#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
|
||||||
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
|
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
|
||||||
|
|
|
@ -65,8 +65,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
||||||
{
|
{
|
||||||
if (buffer->unmap_len) {
|
if (buffer->unmap_len) {
|
||||||
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
|
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
|
||||||
dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
|
dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
|
||||||
buffer->unmap_len);
|
|
||||||
if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
|
if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
|
||||||
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
|
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
@ -414,6 +413,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
||||||
/* Transfer ownership of the unmapping to the final buffer */
|
/* Transfer ownership of the unmapping to the final buffer */
|
||||||
buffer->flags = EFX_TX_BUF_CONT | dma_flags;
|
buffer->flags = EFX_TX_BUF_CONT | dma_flags;
|
||||||
buffer->unmap_len = unmap_len;
|
buffer->unmap_len = unmap_len;
|
||||||
|
buffer->dma_offset = buffer->dma_addr - unmap_addr;
|
||||||
unmap_len = 0;
|
unmap_len = 0;
|
||||||
|
|
||||||
/* Get address and size of next fragment */
|
/* Get address and size of next fragment */
|
||||||
|
@ -980,6 +980,7 @@ static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
buffer->unmap_len = buffer->len;
|
buffer->unmap_len = buffer->len;
|
||||||
|
buffer->dma_offset = 0;
|
||||||
buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
|
buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1121,6 +1122,7 @@ static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
|
||||||
if (st->in_len == 0) {
|
if (st->in_len == 0) {
|
||||||
/* Transfer ownership of the DMA mapping */
|
/* Transfer ownership of the DMA mapping */
|
||||||
buffer->unmap_len = st->unmap_len;
|
buffer->unmap_len = st->unmap_len;
|
||||||
|
buffer->dma_offset = buffer->unmap_len - buffer->len;
|
||||||
buffer->flags |= st->dma_flags;
|
buffer->flags |= st->dma_flags;
|
||||||
st->unmap_len = 0;
|
st->unmap_len = 0;
|
||||||
}
|
}
|
||||||
|
@ -1219,6 +1221,7 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
|
||||||
if (is_last) {
|
if (is_last) {
|
||||||
buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
|
buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
|
||||||
buffer->unmap_len = st->header_unmap_len;
|
buffer->unmap_len = st->header_unmap_len;
|
||||||
|
buffer->dma_offset = 0;
|
||||||
/* Ensure we only unmap them once in case of a
|
/* Ensure we only unmap them once in case of a
|
||||||
* later DMA mapping error and rollback
|
* later DMA mapping error and rollback
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue