net: fec: better implementation of iMX6 ERR006358 quirk
Using a (delayed) workqueue for ERR006358 is not correct - a work queue is a single-trigger device. Once the work queue has been scheduled, it can't be re-scheduled until it has been run. This can cause problems - with an appropriate packet timing, we can end up with packets queued, but not sent by the hardware, resulting in the transmit timeout firing. Re-implement this as per the workaround detailed in the ERR006358 documentation - if there are packets waiting to be sent when we service the transmit ring, and we see that the transmitter is not running, kick the transmitter to run the pending entries in the ring. Testing here with a 10Mbit half duplex link sees the resulting iperf TCP bandwidth increase from between 1 to 2Mbps to between 8 to 9Mbps. Acked-by: Fugang Duan <B38611@freescale.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
efa95b01da
commit
ccea296839
|
@ -259,7 +259,6 @@ struct bufdesc_ex {
|
|||
struct fec_enet_delayed_work {
|
||||
struct delayed_work delay_work;
|
||||
bool timeout;
|
||||
bool trig_tx;
|
||||
};
|
||||
|
||||
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
|
||||
|
|
|
@ -342,22 +342,6 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep)
|
||||
{
|
||||
const struct platform_device_id *id_entry =
|
||||
platform_get_device_id(fep->pdev);
|
||||
struct bufdesc *bdp_pre;
|
||||
|
||||
bdp_pre = fec_enet_get_prevdesc(bdp, fep);
|
||||
if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
|
||||
!(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
|
||||
fep->delay_work.trig_tx = true;
|
||||
schedule_delayed_work(&(fep->delay_work.delay_work),
|
||||
msecs_to_jiffies(1));
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
|
||||
{
|
||||
|
@ -545,8 +529,6 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
|
|||
status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
|
||||
bdp->cbd_sc = status;
|
||||
|
||||
fec_enet_submit_work(bdp, fep);
|
||||
|
||||
/* If this was the last BD in the ring, start at the beginning again. */
|
||||
bdp = fec_enet_get_nextdesc(last_bdp, fep);
|
||||
|
||||
|
@ -735,8 +717,6 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
|
|||
/* Save skb pointer */
|
||||
fep->tx_skbuff[index] = skb;
|
||||
|
||||
fec_enet_submit_work(bdp, fep);
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
fep->cur_tx = bdp;
|
||||
|
||||
|
@ -1065,11 +1045,6 @@ static void fec_enet_work(struct work_struct *work)
|
|||
}
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
if (fep->delay_work.trig_tx) {
|
||||
fep->delay_work.trig_tx = false;
|
||||
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1166,7 +1141,10 @@ fec_enet_tx(struct net_device *ndev)
|
|||
netif_wake_queue(ndev);
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
/* ERR006538: Keep the transmitter going */
|
||||
if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
|
||||
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
|
||||
}
|
||||
|
||||
/* During a receive, the cur_rx points to the current incoming buffer.
|
||||
|
|
Loading…
Reference in New Issue