net/mlx4_en: Fix handling of dma_map failure
Result of skb_frag_dma_map() and dma_map_single() wasn't checked. Added a check and proper handling in case of failure. Moved the mapping to the beginning of mlx4_en_xmit(), before updating the ring data structure to make error handling easier. Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
bd2f631d7c
commit
237a3a3b15
|
@ -588,6 +588,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct device *ddev = priv->ddev;
|
||||
struct mlx4_en_tx_ring *ring;
|
||||
struct mlx4_en_tx_desc *tx_desc;
|
||||
struct mlx4_wqe_data_seg *data;
|
||||
|
@ -674,6 +675,56 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
tx_info->skb = skb;
|
||||
tx_info->nr_txbb = nr_txbb;
|
||||
|
||||
if (lso_header_size)
|
||||
data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
|
||||
DS_SIZE));
|
||||
else
|
||||
data = &tx_desc->data;
|
||||
|
||||
/* valid only for none inline segments */
|
||||
tx_info->data_offset = (void *)data - (void *)tx_desc;
|
||||
|
||||
tx_info->linear = (lso_header_size < skb_headlen(skb) &&
|
||||
!is_inline(skb, NULL)) ? 1 : 0;
|
||||
|
||||
data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
|
||||
|
||||
if (is_inline(skb, &fragptr)) {
|
||||
tx_info->inl = 1;
|
||||
} else {
|
||||
/* Map fragments */
|
||||
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
|
||||
frag = &skb_shinfo(skb)->frags[i];
|
||||
dma = skb_frag_dma_map(ddev, frag,
|
||||
0, skb_frag_size(frag),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ddev, dma))
|
||||
goto tx_drop_unmap;
|
||||
|
||||
data->addr = cpu_to_be64(dma);
|
||||
data->lkey = cpu_to_be32(mdev->mr.key);
|
||||
wmb();
|
||||
data->byte_count = cpu_to_be32(skb_frag_size(frag));
|
||||
--data;
|
||||
}
|
||||
|
||||
/* Map linear part */
|
||||
if (tx_info->linear) {
|
||||
u32 byte_count = skb_headlen(skb) - lso_header_size;
|
||||
dma = dma_map_single(ddev, skb->data +
|
||||
lso_header_size, byte_count,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (dma_mapping_error(ddev, dma))
|
||||
goto tx_drop_unmap;
|
||||
|
||||
data->addr = cpu_to_be64(dma);
|
||||
data->lkey = cpu_to_be32(mdev->mr.key);
|
||||
wmb();
|
||||
data->byte_count = cpu_to_be32(byte_count);
|
||||
}
|
||||
tx_info->inl = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For timestamping add flag to skb_shinfo and
|
||||
* set flag for further reference
|
||||
|
@ -720,8 +771,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* Copy headers;
|
||||
* note that we already verified that it is linear */
|
||||
memcpy(tx_desc->lso.header, skb->data, lso_header_size);
|
||||
data = ((void *) &tx_desc->lso +
|
||||
ALIGN(lso_header_size + 4, DS_SIZE));
|
||||
|
||||
priv->port_stats.tso_packets++;
|
||||
i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
|
||||
|
@ -733,7 +782,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
|
||||
((ring->prod & ring->size) ?
|
||||
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
|
||||
data = &tx_desc->data;
|
||||
tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
|
||||
ring->packets++;
|
||||
|
||||
|
@ -742,38 +790,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
|
||||
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
|
||||
|
||||
|
||||
/* valid only for none inline segments */
|
||||
tx_info->data_offset = (void *) data - (void *) tx_desc;
|
||||
|
||||
tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
|
||||
data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
|
||||
|
||||
if (!is_inline(skb, &fragptr)) {
|
||||
/* Map fragments */
|
||||
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
|
||||
frag = &skb_shinfo(skb)->frags[i];
|
||||
dma = skb_frag_dma_map(priv->ddev, frag,
|
||||
0, skb_frag_size(frag),
|
||||
DMA_TO_DEVICE);
|
||||
data->addr = cpu_to_be64(dma);
|
||||
data->lkey = cpu_to_be32(mdev->mr.key);
|
||||
wmb();
|
||||
data->byte_count = cpu_to_be32(skb_frag_size(frag));
|
||||
--data;
|
||||
}
|
||||
|
||||
/* Map linear part */
|
||||
if (tx_info->linear) {
|
||||
dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
|
||||
skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
|
||||
data->addr = cpu_to_be64(dma);
|
||||
data->lkey = cpu_to_be32(mdev->mr.key);
|
||||
wmb();
|
||||
data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
|
||||
}
|
||||
tx_info->inl = 0;
|
||||
} else {
|
||||
if (tx_info->inl) {
|
||||
build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
|
||||
tx_info->inl = 1;
|
||||
}
|
||||
|
@ -813,6 +830,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
tx_drop_unmap:
|
||||
en_err(priv, "DMA mapping error\n");
|
||||
|
||||
for (i++; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
data++;
|
||||
dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
|
||||
be32_to_cpu(data->byte_count),
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
||||
tx_drop:
|
||||
dev_kfree_skb_any(skb);
|
||||
priv->stats.tx_dropped++;
|
||||
|
|
Loading…
Reference in New Issue