diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 9d81e7a48dba..a7a67376615b 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -1239,19 +1239,9 @@ static int gfar_enet_open(struct net_device *dev) return err; } -static inline struct txfcb *gfar_add_fcb(struct sk_buff **skbp) +static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) { - struct txfcb *fcb; - struct sk_buff *skb = *skbp; - - if (unlikely(skb_headroom(skb) < GMAC_FCB_LEN)) { - struct sk_buff *old_skb = skb; - skb = skb_realloc_headroom(old_skb, GMAC_FCB_LEN); - if (!skb) - return NULL; - dev_kfree_skb_any(old_skb); - } - fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); + struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); cacheable_memzero(fcb, GMAC_FCB_LEN); return fcb; @@ -1320,6 +1310,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) base = priv->tx_bd_base; + /* make space for additional header */ + if (skb_headroom(skb) < GMAC_FCB_LEN) { + struct sk_buff *skb_new; + + skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); + if (!skb_new) { + dev->stats.tx_errors++; + kfree(skb); + return NETDEV_TX_OK; + } + kfree_skb(skb); + skb = skb_new; + } + /* total number of fragments in the SKB */ nr_frags = skb_shinfo(skb)->nr_frags; @@ -1372,20 +1376,18 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Set up checksumming */ if (CHECKSUM_PARTIAL == skb->ip_summed) { - fcb = gfar_add_fcb(&skb); - if (likely(fcb != NULL)) { - lstatus |= BD_LFLAG(TXBD_TOE); - gfar_tx_checksum(skb, fcb); - } + fcb = gfar_add_fcb(skb); + lstatus |= BD_LFLAG(TXBD_TOE); + gfar_tx_checksum(skb, fcb); } if (priv->vlgrp && vlan_tx_tag_present(skb)) { - if (unlikely(NULL == fcb)) - fcb = gfar_add_fcb(&skb); - if (likely(fcb != NULL)) { + if (unlikely(NULL == fcb)) { + fcb = gfar_add_fcb(skb); lstatus |= BD_LFLAG(TXBD_TOE); - gfar_tx_vlan(skb, fcb); } + + gfar_tx_vlan(skb, fcb); } /* setup the TxBD length and buffer pointer for the first BD */ @@ -1433,7 +1435,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Unlock priv */ spin_unlock_irqrestore(&priv->txlock, flags); - return 0; + return NETDEV_TX_OK; } /* Stops the kernel queue, and halts the controller */