Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2016-07-20

This series contains updates to fm10k only.

Ngai-Mint provides a fix to clear PCIE_GMBX bits to ensure the proper
functioning of the mailbox global interrupt after a data path reset.

Jake provides most of the patches in the series, starting with a early
return from fm10k_down() if we are already down to prevent conflict with
other threads.  Fixed an issue where fm10k_update_stats() could cause
a null pointer dereference, specifically if it is called when we are going
down and the rings have been removed.  Cleans up and fixes the data path
reset flow, Tx hang routine and stop_hw().  Re-worked the fm10k_reinit()
to be more maintainable and fixed several inconsistencies with the work
flow.  Implemented fm10k_prepare_suspend() and fm10k_handle_resume()
which abstract around the now existing fm10k_prepare_for_reset and
fm10k_handle_reset. The new functions also handle stopping the service
task, which is something that the original re-init flow does not need.
Fixed an issue where if an FLR occurs, VF devices will be knocked out of
bus master mode, and the driver will be unable to recover from the reset
properly, so ensure bus master is enabled after every reset.  Fixed an
issue where a reset will occur as if for no reason, regularly every few
minutes until the switch manager software is loaded, which is caused
by continuously requesting the lport map so only do the request after
we have verified the switch mailbox is tx_ready.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-07-20 21:36:55 -07:00
commit f67fe5c80b
9 changed files with 233 additions and 167 deletions

View File

@ -362,6 +362,7 @@ enum fm10k_state_t {
__FM10K_SERVICE_DISABLE,
__FM10K_MBX_LOCK,
__FM10K_LINK_DOWN,
__FM10K_UPDATING_STATS,
};
static inline void fm10k_mbx_lock(struct fm10k_intfc *interface)
@ -457,6 +458,7 @@ __be16 fm10k_tx_encap_offload(struct sk_buff *skb);
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_ring *tx_ring);
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
u64 fm10k_get_tx_pending(struct fm10k_ring *ring);
bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring);
void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count);

View File

@ -519,8 +519,12 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
goto out;
/* interface cannot receive traffic without logical ports */
if (mac->dglort_map == FM10K_DGLORTMAP_NONE)
if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
if (hw->mac.ops.request_lport_map)
ret_val = hw->mac.ops.request_lport_map(hw);
goto out;
}
/* if we passed all the tests above then the switch is ready and we no
* longer need to check for link

View File

@ -76,6 +76,8 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending),
FM10K_STAT("tx_hang_count", tx_timeout_count),
};

View File

@ -28,7 +28,7 @@
#include "fm10k.h"
#define DRV_VERSION "0.19.3-k"
#define DRV_VERSION "0.21.2-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
@ -1128,11 +1128,13 @@ static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
return ring->stats.packets;
}
static u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
{
/* use SW head and tail until we have real hardware */
u32 head = ring->next_to_clean;
u32 tail = ring->next_to_use;
struct fm10k_intfc *interface = ring->q_vector->interface;
struct fm10k_hw *hw = &interface->hw;
u32 head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
u32 tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
return ((head <= tail) ? tail : tail + ring->count) - head;
}
@ -1856,7 +1858,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
if (v_budget < 0) {
kfree(interface->msix_entries);
interface->msix_entries = NULL;
return -ENOMEM;
return v_budget;
}
/* record the number of queues available for q_vectors */

View File

@ -41,6 +41,8 @@ struct fm10k_mbx_info;
#define FM10K_MBX_ACK_INTERRUPT 0x00000010
#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020
#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040
#define FM10K_MBX_GLOBAL_REQ_INTERRUPT 0x00000200
#define FM10K_MBX_GLOBAL_ACK_INTERRUPT 0x00000400
#define FM10K_MBICR(_n) ((_n) + 0x18840)
#define FM10K_GMBX 0x18842

View File

@ -123,11 +123,24 @@ static void fm10k_service_timer(unsigned long data)
static void fm10k_detach_subtask(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
u32 __iomem *hw_addr;
u32 value;
/* do nothing if device is still present or hw_addr is set */
if (netif_device_present(netdev) || interface->hw.hw_addr)
return;
/* check the real address space to see if we've recovered */
hw_addr = READ_ONCE(interface->uc_addr);
value = readl(hw_addr);
if ((~value)) {
interface->hw.hw_addr = interface->uc_addr;
netif_device_attach(netdev);
interface->flags |= FM10K_FLAG_RESET_REQUESTED;
netdev_warn(netdev, "PCIe link restored, device now attached\n");
return;
}
rtnl_lock();
if (netif_running(netdev))
@ -136,11 +149,9 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
rtnl_unlock();
}
static void fm10k_reinit(struct fm10k_intfc *interface)
static void fm10k_prepare_for_reset(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
int err;
WARN_ON(in_interrupt());
@ -165,6 +176,19 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
/* delay any future reset requests */
interface->last_reset = jiffies + (10 * HZ);
rtnl_unlock();
}
static int fm10k_handle_reset(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
int err;
rtnl_lock();
pci_set_master(interface->pdev);
/* reset and initialize the hardware so it is in a known state */
err = hw->mac.ops.reset_hw(hw);
if (err) {
@ -185,7 +209,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
goto reinit_err;
}
/* reassociate interrupts */
/* re-associate interrupts */
err = fm10k_mbx_request_irq(interface);
if (err)
goto err_mbx_irq;
@ -219,7 +243,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
clear_bit(__FM10K_RESETTING, &interface->state);
return;
return err;
err_open:
fm10k_mbx_free_irq(interface);
err_mbx_irq:
@ -230,6 +254,20 @@ reinit_err:
rtnl_unlock();
clear_bit(__FM10K_RESETTING, &interface->state);
return err;
}
static void fm10k_reinit(struct fm10k_intfc *interface)
{
int err;
fm10k_prepare_for_reset(interface);
err = fm10k_handle_reset(interface);
if (err)
dev_err(&interface->pdev->dev,
"fm10k_handle_reset failed: %d\n", err);
}
static void fm10k_reset_subtask(struct fm10k_intfc *interface)
@ -372,12 +410,19 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
u64 bytes, pkts;
int i;
/* ensure only one thread updates stats at a time */
if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
return;
/* do not allow stats update via service task for next second */
interface->next_stats_update = jiffies + HZ;
/* gather some stats to the interface struct that are per queue */
for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
struct fm10k_ring *tx_ring = interface->tx_ring[i];
struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]);
if (!tx_ring)
continue;
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
@ -396,7 +441,10 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
/* gather some stats to the interface struct that are per queue */
for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
struct fm10k_ring *rx_ring = interface->rx_ring[i];
struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]);
if (!rx_ring)
continue;
bytes += rx_ring->stats.bytes;
pkts += rx_ring->stats.packets;
@ -443,6 +491,8 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
/* Fill out the OS statistics structure */
net_stats->rx_errors = rx_errors;
net_stats->rx_dropped = interface->stats.nodesc_drop.count;
clear_bit(__FM10K_UPDATING_STATS, &interface->state);
}
/**
@ -1566,6 +1616,9 @@ void fm10k_up(struct fm10k_intfc *interface)
/* configure interrupts */
hw->mac.ops.update_int_moderator(hw);
/* enable statistics capture again */
clear_bit(__FM10K_UPDATING_STATS, &interface->state);
/* clear down bit to indicate we are ready to go */
clear_bit(__FM10K_DOWN, &interface->state);
@ -1598,10 +1651,11 @@ void fm10k_down(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
int err;
int err, i = 0, count = 0;
/* signal that we are down to the interrupt handler and service task */
set_bit(__FM10K_DOWN, &interface->state);
if (test_and_set_bit(__FM10K_DOWN, &interface->state))
return;
/* call carrier off first to avoid false dev_watchdog timeouts */
netif_carrier_off(netdev);
@ -1613,18 +1667,57 @@ void fm10k_down(struct fm10k_intfc *interface)
/* reset Rx filters */
fm10k_reset_rx_state(interface);
/* allow 10ms for device to quiesce */
usleep_range(10000, 20000);
/* disable polling routines */
fm10k_napi_disable_all(interface);
/* capture stats one last time before stopping interface */
fm10k_update_stats(interface);
/* prevent updating statistics while we're down */
while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
usleep_range(1000, 2000);
/* skip waiting for TX DMA if we lost PCIe link */
if (FM10K_REMOVED(hw->hw_addr))
goto skip_tx_dma_drain;
/* In some rare circumstances it can take a while for Tx queues to
* quiesce and be fully disabled. Attempt to .stop_hw() first, and
* then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop
* until the Tx queues have emptied, or until a number of retries. If
* we fail to clear within the retry loop, we will issue a warning
* indicating that Tx DMA is probably hung. Note this means we call
* .stop_hw() twice but this shouldn't cause any problems.
*/
err = hw->mac.ops.stop_hw(hw);
if (err != FM10K_ERR_REQUESTS_PENDING)
goto skip_tx_dma_drain;
#define TX_DMA_DRAIN_RETRIES 25
for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) {
usleep_range(10000, 20000);
/* start checking at the last ring to have pending Tx */
for (; i < interface->num_tx_queues; i++)
if (fm10k_get_tx_pending(interface->tx_ring[i]))
break;
/* if all the queues are drained, we can break now */
if (i == interface->num_tx_queues)
break;
}
if (count >= TX_DMA_DRAIN_RETRIES)
dev_err(&interface->pdev->dev,
"Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n",
count);
skip_tx_dma_drain:
/* Disable DMA engine for Tx/Rx */
err = hw->mac.ops.stop_hw(hw);
if (err)
if (err == FM10K_ERR_REQUESTS_PENDING)
dev_err(&interface->pdev->dev,
"due to pending requests hw was not shut down gracefully\n");
else if (err)
dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
/* free any buffers still on the rings */
@ -1750,6 +1843,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
/* Start off interface as being down */
set_bit(__FM10K_DOWN, &interface->state);
set_bit(__FM10K_UPDATING_STATS, &interface->state);
return 0;
}
@ -2033,6 +2127,48 @@ static void fm10k_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
{
/* the watchdog task reads from registers, which might appear like
* a surprise remove if the PCIe device is disabled while we're
* stopped. We stop the watchdog task until after we resume software
* activity.
*/
set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
cancel_work_sync(&interface->service_task);
fm10k_prepare_for_reset(interface);
}
static int fm10k_handle_resume(struct fm10k_intfc *interface)
{
struct fm10k_hw *hw = &interface->hw;
int err;
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
err = fm10k_handle_reset(interface);
if (err)
return err;
/* assume host is not ready, to prevent race with watchdog in case we
* actually don't have connection to the switch
*/
interface->host_ready = false;
fm10k_watchdog_host_not_ready(interface);
/* force link to stay down for a second to prevent link flutter */
interface->link_down_event = jiffies + (HZ);
set_bit(__FM10K_LINK_DOWN, &interface->state);
/* clear the service task disable bit to allow service task to start */
clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
fm10k_service_event_schedule(interface);
return err;
}
#ifdef CONFIG_PM
/**
* fm10k_resume - Restore device to pre-sleep state
@ -2069,60 +2205,13 @@ static int fm10k_resume(struct pci_dev *pdev)
/* refresh hw_addr in case it was dropped */
hw->hw_addr = interface->uc_addr;
/* reset hardware to known state */
err = hw->mac.ops.init_hw(&interface->hw);
if (err) {
dev_err(&pdev->dev, "init_hw failed: %d\n", err);
err = fm10k_handle_resume(interface);
if (err)
return err;
}
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
rtnl_lock();
err = fm10k_init_queueing_scheme(interface);
if (err)
goto err_queueing_scheme;
err = fm10k_mbx_request_irq(interface);
if (err)
goto err_mbx_irq;
err = fm10k_hw_ready(interface);
if (err)
goto err_open;
err = netif_running(netdev) ? fm10k_open(netdev) : 0;
if (err)
goto err_open;
rtnl_unlock();
/* assume host is not ready, to prevent race with watchdog in case we
* actually don't have connection to the switch
*/
interface->host_ready = false;
fm10k_watchdog_host_not_ready(interface);
/* clear the service task disable bit to allow service task to start */
clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
fm10k_service_event_schedule(interface);
/* restore SR-IOV interface */
fm10k_iov_resume(pdev);
netif_device_attach(netdev);
return 0;
err_open:
fm10k_mbx_free_irq(interface);
err_mbx_irq:
fm10k_clear_queueing_scheme(interface);
err_queueing_scheme:
rtnl_unlock();
return err;
}
/**
@ -2142,27 +2231,7 @@ static int fm10k_suspend(struct pci_dev *pdev,
netif_device_detach(netdev);
fm10k_iov_suspend(pdev);
/* the watchdog tasks may read registers, which will appear like a
* surprise-remove event once the PCI device is disabled. This will
* cause us to close the netdevice, so we don't retain the open/closed
* state post-resume. Prevent this by disabling the service task while
* suspended, until we actually resume.
*/
set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
cancel_work_sync(&interface->service_task);
rtnl_lock();
if (netif_running(netdev))
fm10k_close(netdev);
fm10k_mbx_free_irq(interface);
fm10k_clear_queueing_scheme(interface);
rtnl_unlock();
fm10k_prepare_suspend(interface);
err = pci_save_state(pdev);
if (err)
@ -2195,17 +2264,7 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
rtnl_lock();
if (netif_running(netdev))
fm10k_close(netdev);
fm10k_mbx_free_irq(interface);
/* free interrupts */
fm10k_clear_queueing_scheme(interface);
rtnl_unlock();
fm10k_prepare_suspend(interface);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@ -2219,7 +2278,6 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
pci_ers_result_t result;
if (pci_enable_device_mem(pdev)) {
@ -2237,12 +2295,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
/* refresh hw_addr in case it was dropped */
interface->hw.hw_addr = interface->uc_addr;
interface->flags |= FM10K_FLAG_RESET_REQUESTED;
fm10k_service_event_schedule(interface);
result = PCI_ERS_RESULT_RECOVERED;
}
@ -2262,50 +2314,54 @@ static void fm10k_io_resume(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
int err;
err = fm10k_handle_resume(interface);
if (err)
dev_warn(&pdev->dev,
"fm10k_io_resume failed: %d\n", err);
else
netif_device_attach(netdev);
}
/**
* fm10k_io_reset_notify - called when PCI function is reset
* @pdev: Pointer to PCI device
*
* This callback is called when the PCI function is reset such as from
* /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it
* means we should prepare for a function reset. If prepare is false, it means
* the function reset just occurred.
*/
static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
int err = 0;
/* reset hardware to known state */
err = hw->mac.ops.init_hw(&interface->hw);
if (err) {
dev_err(&pdev->dev, "init_hw failed: %d\n", err);
return;
if (prepare) {
/* warn incase we have any active VF devices */
if (pci_num_vf(pdev))
dev_warn(&pdev->dev,
"PCIe FLR may cause issues for any active VF devices\n");
fm10k_prepare_suspend(interface);
} else {
err = fm10k_handle_resume(interface);
}
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
rtnl_lock();
err = fm10k_init_queueing_scheme(interface);
if (err) {
dev_err(&interface->pdev->dev,
"init_queueing_scheme failed: %d\n", err);
goto unlock;
dev_warn(&pdev->dev,
"fm10k_io_reset_notify failed: %d\n", err);
netif_device_detach(interface->netdev);
}
/* reassociate interrupts */
fm10k_mbx_request_irq(interface);
rtnl_lock();
if (netif_running(netdev))
err = fm10k_open(netdev);
rtnl_unlock();
/* final check of hardware state before registering the interface */
err = err ? : fm10k_hw_ready(interface);
if (!err)
netif_device_attach(netdev);
unlock:
rtnl_unlock();
}
static const struct pci_error_handlers fm10k_err_handler = {
.error_detected = fm10k_io_error_detected,
.slot_reset = fm10k_io_slot_reset,
.resume = fm10k_io_resume,
.reset_notify = fm10k_io_reset_notify,
};
static struct pci_driver fm10k_driver = {

View File

@ -51,34 +51,37 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
/* shut down all rings */
err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
if (err)
if (err == FM10K_ERR_REQUESTS_PENDING) {
hw->mac.reset_while_pending++;
goto force_reset;
} else if (err) {
return err;
}
/* Verify that DMA is no longer active */
reg = fm10k_read_reg(hw, FM10K_DMA_CTRL);
if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
return FM10K_ERR_DMA_PENDING;
/* verify the switch is ready for reset */
reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
goto out;
force_reset:
/* Inititate data path reset */
reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
reg = FM10K_DMA_CTRL_DATAPATH_RESET;
fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
/* Flush write and allow 100us for reset to complete */
fm10k_write_flush(hw);
udelay(FM10K_RESET_TIMEOUT);
/* Reset mailbox global interrupts */
reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT;
fm10k_write_reg(hw, FM10K_GMBX, reg);
/* Verify we made it out of reset */
reg = fm10k_read_reg(hw, FM10K_IP);
if (!(reg & FM10K_IP_NOTINRESET))
err = FM10K_ERR_RESET_FAILED;
return FM10K_ERR_RESET_FAILED;
out:
return err;
return 0;
}
/**
@ -1619,25 +1622,15 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
**/
static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
{
s32 ret_val = 0;
u32 dma_ctrl2;
/* verify the switch is ready for interaction */
dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
goto out;
return 0;
/* retrieve generic host state info */
ret_val = fm10k_get_host_state_generic(hw, switch_ready);
if (ret_val)
goto out;
/* interface cannot receive traffic without logical ports */
if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
ret_val = fm10k_request_lport_map_pf(hw);
out:
return ret_val;
return fm10k_get_host_state_generic(hw, switch_ready);
}
/* This structure defines the attibutes to be parsed below */
@ -1813,6 +1806,7 @@ static const struct fm10k_mac_ops mac_ops_pf = {
.set_dma_mask = fm10k_set_dma_mask_pf,
.get_fault = fm10k_get_fault_pf,
.get_host_state = fm10k_get_host_state_pf,
.request_lport_map = fm10k_request_lport_map_pf,
};
static const struct fm10k_iov_ops iov_ops_pf = {

View File

@ -526,6 +526,7 @@ struct fm10k_mac_ops {
s32 (*stop_hw)(struct fm10k_hw *);
s32 (*get_bus_info)(struct fm10k_hw *);
s32 (*get_host_state)(struct fm10k_hw *, bool *);
s32 (*request_lport_map)(struct fm10k_hw *);
s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);
s32 (*read_mac_addr)(struct fm10k_hw *);
s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *,
@ -562,6 +563,7 @@ struct fm10k_mac_info {
bool tx_ready;
u32 dglort_map;
u8 itr_scale;
u64 reset_while_pending;
};
struct fm10k_swapi_table_info {

View File

@ -34,7 +34,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
/* we need to disable the queues before taking further steps */
err = fm10k_stop_hw_generic(hw);
if (err)
if (err && err != FM10K_ERR_REQUESTS_PENDING)
return err;
/* If permanent address is set then we need to restore it */
@ -67,7 +67,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
fm10k_write_reg(hw, FM10K_TDLEN(i), tdlen);
}
return 0;
return err;
}
/**
@ -83,7 +83,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
/* shut down queues we own and reset DMA configuration */
err = fm10k_stop_hw_vf(hw);
if (err)
if (err == FM10K_ERR_REQUESTS_PENDING)
hw->mac.reset_while_pending++;
else if (err)
return err;
/* Inititate VF reset */
@ -96,9 +98,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
/* Clear reset bit and verify it was cleared */
fm10k_write_reg(hw, FM10K_VFCTRL, 0);
if (fm10k_read_reg(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST)
err = FM10K_ERR_RESET_FAILED;
return FM10K_ERR_RESET_FAILED;
return err;
return 0;
}
/**