net: mana: Fix MANA VF unload when hardware is unresponsive
When unloading the MANA driver, mana_dealloc_queues() waits for the MANA
hardware to complete any inflight packets and set the pending send count
to zero. But if the hardware has failed, mana_dealloc_queues()
could wait forever.
Fix this by adding a timeout to the wait. Set the timeout to 120 seconds,
which is a somewhat arbitrary value that is more than long enough for
functional hardware to complete any sends.
Cc: stable@vger.kernel.org
Fixes: ca9c54d2d6
("net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)")
Signed-off-by: Souradeep Chakrabarti <schakrabarti@linux.microsoft.com>
Link: https://lore.kernel.org/r/1691576525-24271-1-git-send-email-schakrabarti@linux.microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
048c796beb
commit
a7dfeda6fd
|
@ -8,6 +8,7 @@
|
|||
#include <linux/ethtool.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <net/checksum.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
|
@ -2345,9 +2346,12 @@ int mana_attach(struct net_device *ndev)
|
|||
static int mana_dealloc_queues(struct net_device *ndev)
|
||||
{
|
||||
struct mana_port_context *apc = netdev_priv(ndev);
|
||||
unsigned long timeout = jiffies + 120 * HZ;
|
||||
struct gdma_dev *gd = apc->ac->gdma_dev;
|
||||
struct mana_txq *txq;
|
||||
struct sk_buff *skb;
|
||||
int i, err;
|
||||
u32 tsleep;
|
||||
|
||||
if (apc->port_is_up)
|
||||
return -EINVAL;
|
||||
|
@ -2363,15 +2367,40 @@ static int mana_dealloc_queues(struct net_device *ndev)
|
|||
* to false, but it doesn't matter since mana_start_xmit() drops any
|
||||
* new packets due to apc->port_is_up being false.
|
||||
*
|
||||
* Drain all the in-flight TX packets
|
||||
* Drain all the in-flight TX packets.
|
||||
* A timeout of 120 seconds for all the queues is used.
|
||||
* This will break the while loop when h/w is not responding.
|
||||
* This value of 120 has been decided here considering max
|
||||
* number of queues.
|
||||
*/
|
||||
|
||||
for (i = 0; i < apc->num_queues; i++) {
|
||||
txq = &apc->tx_qp[i].txq;
|
||||
|
||||
while (atomic_read(&txq->pending_sends) > 0)
|
||||
usleep_range(1000, 2000);
|
||||
tsleep = 1000;
|
||||
while (atomic_read(&txq->pending_sends) > 0 &&
|
||||
time_before(jiffies, timeout)) {
|
||||
usleep_range(tsleep, tsleep + 1000);
|
||||
tsleep <<= 1;
|
||||
}
|
||||
if (atomic_read(&txq->pending_sends)) {
|
||||
err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
|
||||
if (err) {
|
||||
netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
|
||||
err, atomic_read(&txq->pending_sends),
|
||||
txq->gdma_txq_id);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < apc->num_queues; i++) {
|
||||
txq = &apc->tx_qp[i].txq;
|
||||
while ((skb = skb_dequeue(&txq->pending_skbs))) {
|
||||
mana_unmap_skb(skb, apc);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
atomic_set(&txq->pending_sends, 0);
|
||||
}
|
||||
/* We're 100% sure the queues can no longer be woken up, because
|
||||
* we're sure now mana_poll_tx_cq() can't be running.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue