Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next

Ben Hutchings says:

====================
1. Change the TX path to stop queues earlier and avoid returning
NETDEV_TX_BUSY.
2. Remove some inefficiencies in soft-TSO.
3. Fix various bugs involving device state transitions and/or reset
scheduling by error handlers.
4. Take advantage of my previous change to operstate initialisation.
5. Miscellaneous cleanup.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2012-08-24 16:35:43 -04:00
commit 255e87657a
6 changed files with 412 additions and 509 deletions

View File

@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \ #define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \ do { \
if ((efx->state == STATE_RUNNING) || \ if ((efx->state == STATE_READY) || \
(efx->state == STATE_DISABLED)) \ (efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \ ASSERT_RTNL(); \
} while (0) } while (0)
static int efx_check_disabled(struct efx_nic *efx)
{
if (efx->state == STATE_DISABLED) {
netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n");
return -EIO;
}
return 0;
}
/************************************************************************** /**************************************************************************
* *
* Event queue processing * Event queue processing
@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_buffer_order = get_order(efx->rx_buffer_len + efx->rx_buffer_order = get_order(efx->rx_buffer_len +
sizeof(struct efx_rx_page_state)); sizeof(struct efx_rx_page_state));
/* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly
* match the hardware ring size, but it's not that important.
* Therefore we stop the queue when one more skb might fill
* the ring completely. We wake it when half way back to
* empty.
*/
efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
/* Initialise the channels */ /* Initialise the channels */
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_channel_tx_queue(tx_queue, channel)
@ -730,7 +750,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries; u32 old_rxq_entries, old_txq_entries;
unsigned i, next_buffer_table = 0; unsigned i, next_buffer_table = 0;
int rc = 0; int rc;
rc = efx_check_disabled(efx);
if (rc)
return rc;
/* Not all channels should be reallocated. We must avoid /* Not all channels should be reallocated. We must avoid
* reallocating their buffer table entries. * reallocating their buffer table entries.
@ -1365,6 +1389,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
{ {
struct efx_channel *channel; struct efx_channel *channel;
BUG_ON(efx->state == STATE_DISABLED);
if (efx->legacy_irq) if (efx->legacy_irq)
efx->legacy_irq_enabled = true; efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx); efx_nic_enable_interrupts(efx);
@ -1382,6 +1408,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
{ {
struct efx_channel *channel; struct efx_channel *channel;
if (efx->state == STATE_DISABLED)
return;
efx_mcdi_mode_poll(efx); efx_mcdi_mode_poll(efx);
efx_nic_disable_interrupts(efx); efx_nic_disable_interrupts(efx);
@ -1533,22 +1562,21 @@ static int efx_probe_all(struct efx_nic *efx)
return rc; return rc;
} }
/* Called after previous invocation(s) of efx_stop_all, restarts the port, /* If the interface is supposed to be running but is not, start
* kernel transmit queues and NAPI processing, and ensures that the port is * the hardware and software data path, regular activity for the port
* scheduled to be reconfigured. This function is safe to call multiple * (MAC statistics, link polling, etc.) and schedule the port to be
* times when the NIC is in any state. * reconfigured. Interrupts must already be enabled. This function
* is safe to call multiple times, so long as the NIC is not disabled.
* Requires the RTNL lock.
*/ */
static void efx_start_all(struct efx_nic *efx) static void efx_start_all(struct efx_nic *efx)
{ {
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->state == STATE_DISABLED);
/* Check that it is appropriate to restart the interface. All /* Check that it is appropriate to restart the interface. All
* of these flags are safe to read under just the rtnl lock */ * of these flags are safe to read under just the rtnl lock */
if (efx->port_enabled) if (efx->port_enabled || !netif_running(efx->net_dev))
return;
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
return;
if (!netif_running(efx->net_dev))
return; return;
efx_start_port(efx); efx_start_port(efx);
@ -1582,11 +1610,11 @@ static void efx_flush_all(struct efx_nic *efx)
cancel_work_sync(&efx->mac_work); cancel_work_sync(&efx->mac_work);
} }
/* Quiesce hardware and software without bringing the link down. /* Quiesce the hardware and software data path, and regular activity
* Safe to call multiple times, when the nic and interface is in any * for the port without bringing the link down. Safe to call multiple
* state. The caller is guaranteed to subsequently be in a position * times with the NIC in almost any state, but interrupts should be
* to modify any hardware and software state they see fit without * enabled. Requires the RTNL lock.
* taking locks. */ */
static void efx_stop_all(struct efx_nic *efx) static void efx_stop_all(struct efx_nic *efx)
{ {
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
@ -1739,8 +1767,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr); struct mii_ioctl_data *data = if_mii(ifr);
EFX_ASSERT_RESET_SERIALISED(efx);
/* Convert phy_id from older PRTAD/DEVAD format */ /* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
(data->phy_id & 0xfc00) == 0x0400) (data->phy_id & 0xfc00) == 0x0400)
@ -1820,13 +1846,14 @@ static void efx_netpoll(struct net_device *net_dev)
static int efx_net_open(struct net_device *net_dev) static int efx_net_open(struct net_device *net_dev)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
EFX_ASSERT_RESET_SERIALISED(efx); int rc;
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
raw_smp_processor_id()); raw_smp_processor_id());
if (efx->state == STATE_DISABLED) rc = efx_check_disabled(efx);
return -EIO; if (rc)
return rc;
if (efx->phy_mode & PHY_MODE_SPECIAL) if (efx->phy_mode & PHY_MODE_SPECIAL)
return -EBUSY; return -EBUSY;
if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
@ -1852,10 +1879,8 @@ static int efx_net_stop(struct net_device *net_dev)
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id()); raw_smp_processor_id());
if (efx->state != STATE_DISABLED) {
/* Stop the device and flush all the channels */ /* Stop the device and flush all the channels */
efx_stop_all(efx); efx_stop_all(efx);
}
return 0; return 0;
} }
@ -1915,9 +1940,11 @@ static void efx_watchdog(struct net_device *net_dev)
static int efx_change_mtu(struct net_device *net_dev, int new_mtu) static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
int rc;
EFX_ASSERT_RESET_SERIALISED(efx); rc = efx_check_disabled(efx);
if (rc)
return rc;
if (new_mtu > EFX_MAX_MTU) if (new_mtu > EFX_MAX_MTU)
return -EINVAL; return -EINVAL;
@ -1926,8 +1953,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
/* Reconfigure the MAC before enabling the dma queues so that
* the RX buffers don't overflow */
net_dev->mtu = new_mtu; net_dev->mtu = new_mtu;
efx->type->reconfigure_mac(efx); efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
@ -1942,8 +1967,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
struct sockaddr *addr = data; struct sockaddr *addr = data;
char *new_addr = addr->sa_data; char *new_addr = addr->sa_data;
EFX_ASSERT_RESET_SERIALISED(efx);
if (!is_valid_ether_addr(new_addr)) { if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"invalid ethernet MAC address requested: %pM\n", "invalid ethernet MAC address requested: %pM\n",
@ -2079,11 +2102,27 @@ static int efx_register_netdev(struct efx_nic *efx)
rtnl_lock(); rtnl_lock();
/* Enable resets to be scheduled and check whether any were
* already requested. If so, the NIC is probably hosed so we
* abort.
*/
efx->state = STATE_READY;
smp_mb(); /* ensure we change state before checking reset_pending */
if (efx->reset_pending) {
netif_err(efx, probe, efx->net_dev,
"aborting probe due to scheduled reset\n");
rc = -EIO;
goto fail_locked;
}
rc = dev_alloc_name(net_dev, net_dev->name); rc = dev_alloc_name(net_dev, net_dev->name);
if (rc < 0) if (rc < 0)
goto fail_locked; goto fail_locked;
efx_update_name(efx); efx_update_name(efx);
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev);
rc = register_netdevice(net_dev); rc = register_netdevice(net_dev);
if (rc) if (rc)
goto fail_locked; goto fail_locked;
@ -2094,9 +2133,6 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_init_tx_queue_core_txq(tx_queue); efx_init_tx_queue_core_txq(tx_queue);
} }
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev);
rtnl_unlock(); rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@ -2108,14 +2144,14 @@ static int efx_register_netdev(struct efx_nic *efx)
return 0; return 0;
fail_registered:
rtnl_lock();
unregister_netdevice(net_dev);
fail_locked: fail_locked:
efx->state = STATE_UNINIT;
rtnl_unlock(); rtnl_unlock();
netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
return rc; return rc;
fail_registered:
unregister_netdev(net_dev);
return rc;
} }
static void efx_unregister_netdev(struct efx_nic *efx) static void efx_unregister_netdev(struct efx_nic *efx)
@ -2138,7 +2174,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
rtnl_lock();
unregister_netdevice(efx->net_dev);
efx->state = STATE_UNINIT;
rtnl_unlock();
} }
/************************************************************************** /**************************************************************************
@ -2154,9 +2194,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx); efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
efx_stop_interrupts(efx, false); efx_stop_interrupts(efx, false);
mutex_lock(&efx->mac_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->phy_op->fini(efx); efx->phy_op->fini(efx);
efx->type->fini(efx); efx->type->fini(efx);
@ -2276,16 +2316,15 @@ static void efx_reset_work(struct work_struct *data)
if (!pending) if (!pending)
return; return;
/* If we're not RUNNING then don't reset. Leave the reset_pending
* flags set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
netif_info(efx, drv, efx->net_dev,
"scheduled reset quenched. NIC not RUNNING\n");
return;
}
rtnl_lock(); rtnl_lock();
/* We checked the state in efx_schedule_reset() but it may
* have changed by now. Now that we have the RTNL lock,
* it cannot change again.
*/
if (efx->state == STATE_READY)
(void)efx_reset(efx, fls(pending) - 1); (void)efx_reset(efx, fls(pending) - 1);
rtnl_unlock(); rtnl_unlock();
} }
@ -2311,6 +2350,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
} }
set_bit(method, &efx->reset_pending); set_bit(method, &efx->reset_pending);
smp_mb(); /* ensure we change reset_pending before checking state */
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
if (ACCESS_ONCE(efx->state) != STATE_READY)
return;
/* efx_process_channel() will no longer read events once a /* efx_process_channel() will no longer read events once a
* reset is scheduled. So switch back to poll'd MCDI completions. */ * reset is scheduled. So switch back to poll'd MCDI completions. */
@ -2376,13 +2422,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
/* This zeroes out and then fills in the invariants in a struct /* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures). * efx_nic (including all sub-structures).
*/ */
static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, static int efx_init_struct(struct efx_nic *efx,
struct pci_dev *pci_dev, struct net_device *net_dev) struct pci_dev *pci_dev, struct net_device *net_dev)
{ {
int i; int i;
/* Initialise common structures */ /* Initialise common structures */
memset(efx, 0, sizeof(*efx));
spin_lock_init(&efx->biu_lock); spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD #ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list); INIT_LIST_HEAD(&efx->mtd_list);
@ -2392,7 +2437,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
efx->pci_dev = pci_dev; efx->pci_dev = pci_dev;
efx->msg_enable = debug; efx->msg_enable = debug;
efx->state = STATE_INIT; efx->state = STATE_UNINIT;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev; efx->net_dev = net_dev;
@ -2409,8 +2454,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
goto fail; goto fail;
} }
efx->type = type;
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
/* Higher numbered interrupt modes are less capable! */ /* Higher numbered interrupt modes are less capable! */
@ -2455,6 +2498,12 @@ static void efx_fini_struct(struct efx_nic *efx)
*/ */
static void efx_pci_remove_main(struct efx_nic *efx) static void efx_pci_remove_main(struct efx_nic *efx)
{ {
/* Flush reset_work. It can no longer be scheduled since we
* are not READY.
*/
BUG_ON(efx->state == STATE_READY);
cancel_work_sync(&efx->reset_work);
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL; efx->net_dev->rx_cpu_rmap = NULL;
@ -2480,24 +2529,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */ /* Mark the NIC as fini, then stop the interface */
rtnl_lock(); rtnl_lock();
efx->state = STATE_FINI;
dev_close(efx->net_dev); dev_close(efx->net_dev);
efx_stop_interrupts(efx, false);
/* Allow any queued efx_resets() to complete */
rtnl_unlock(); rtnl_unlock();
efx_stop_interrupts(efx, false);
efx_sriov_fini(efx); efx_sriov_fini(efx);
efx_unregister_netdev(efx); efx_unregister_netdev(efx);
efx_mtd_remove(efx); efx_mtd_remove(efx);
/* Wait for any scheduled resets to complete. No more will be
* scheduled from this point because efx_stop_all() has been
* called, we are no longer registered with driverlink, and
* the net_device's have been removed. */
cancel_work_sync(&efx->reset_work);
efx_pci_remove_main(efx); efx_pci_remove_main(efx);
efx_fini_io(efx); efx_fini_io(efx);
@ -2617,7 +2657,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
static int __devinit efx_pci_probe(struct pci_dev *pci_dev, static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry) const struct pci_device_id *entry)
{ {
const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
struct net_device *net_dev; struct net_device *net_dev;
struct efx_nic *efx; struct efx_nic *efx;
int rc; int rc;
@ -2627,10 +2666,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
EFX_MAX_RX_QUEUES); EFX_MAX_RX_QUEUES);
if (!net_dev) if (!net_dev)
return -ENOMEM; return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG | efx = netdev_priv(net_dev);
efx->type = (const struct efx_nic_type *) entry->driver_data;
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_TSO |
NETIF_F_RXCSUM); NETIF_F_RXCSUM);
if (type->offload_features & NETIF_F_V6_CSUM) if (efx->type->offload_features & NETIF_F_V6_CSUM)
net_dev->features |= NETIF_F_TSO6; net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */ /* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
@ -2638,10 +2679,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
NETIF_F_RXCSUM); NETIF_F_RXCSUM);
/* All offloads can be toggled */ /* All offloads can be toggled */
net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx); pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev); SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, type, pci_dev, net_dev); rc = efx_init_struct(efx, pci_dev, net_dev);
if (rc) if (rc)
goto fail1; goto fail1;
@ -2656,28 +2696,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
goto fail2; goto fail2;
rc = efx_pci_probe_main(efx); rc = efx_pci_probe_main(efx);
/* Serialise against efx_reset(). No more resets will be
* scheduled since efx_stop_all() has been called, and we have
* not and never have been registered.
*/
cancel_work_sync(&efx->reset_work);
if (rc) if (rc)
goto fail3; goto fail3;
/* If there was a scheduled reset during probe, the NIC is
* probably hosed anyway.
*/
if (efx->reset_pending) {
rc = -EIO;
goto fail4;
}
/* Switch to the running state before we expose the device to the OS,
* so that dev_open()|efx_start_all() will actually start the device */
efx->state = STATE_RUNNING;
rc = efx_register_netdev(efx); rc = efx_register_netdev(efx);
if (rc) if (rc)
goto fail4; goto fail4;
@ -2717,12 +2738,18 @@ static int efx_pm_freeze(struct device *dev)
{ {
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_FINI; rtnl_lock();
if (efx->state != STATE_DISABLED) {
efx->state = STATE_UNINIT;
netif_device_detach(efx->net_dev); netif_device_detach(efx->net_dev);
efx_stop_all(efx); efx_stop_all(efx);
efx_stop_interrupts(efx, false); efx_stop_interrupts(efx, false);
}
rtnl_unlock();
return 0; return 0;
} }
@ -2731,8 +2758,9 @@ static int efx_pm_thaw(struct device *dev)
{ {
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_INIT; rtnl_lock();
if (efx->state != STATE_DISABLED) {
efx_start_interrupts(efx, false); efx_start_interrupts(efx, false);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
@ -2743,9 +2771,12 @@ static int efx_pm_thaw(struct device *dev)
netif_device_attach(efx->net_dev); netif_device_attach(efx->net_dev);
efx->state = STATE_RUNNING; efx->state = STATE_READY;
efx->type->resume_wol(efx); efx->type->resume_wol(efx);
}
rtnl_unlock();
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */ /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
queue_work(reset_workqueue, &efx->reset_work); queue_work(reset_workqueue, &efx->reset_work);

View File

@ -529,9 +529,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (!efx_tests) if (!efx_tests)
goto fail; goto fail;
if (efx->state != STATE_READY) {
ASSERT_RTNL();
if (efx->state != STATE_RUNNING) {
rc = -EIO; rc = -EIO;
goto fail1; goto fail1;
} }

View File

@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
new_mode = PHY_MODE_SPECIAL; new_mode = PHY_MODE_SPECIAL;
if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) { if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
err = 0; err = 0;
} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
err = -EBUSY; err = -EBUSY;
} else { } else {
/* Reset the PHY, reconfigure the MAC and enable/disable /* Reset the PHY, reconfigure the MAC and enable/disable

View File

@ -91,29 +91,31 @@ struct efx_special_buffer {
}; };
/** /**
* struct efx_tx_buffer - An Efx TX buffer * struct efx_tx_buffer - buffer state for a TX descriptor
* @skb: The associated socket buffer. * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
* Set only on the final fragment of a packet; %NULL for all other * freed when descriptor completes
* fragments. When this fragment completes, then we can free this * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
* skb. * freed when descriptor completes.
* @tsoh: The associated TSO header structure, or %NULL if this
* buffer is not a TSO header.
* @dma_addr: DMA address of the fragment. * @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment. * @len: Length of this fragment.
* This field is zero when the queue slot is empty. * This field is zero when the queue slot is empty.
* @continuation: True if this fragment is not the end of a packet.
* @unmap_single: True if dma_unmap_single should be used.
* @unmap_len: Length of this fragment to unmap * @unmap_len: Length of this fragment to unmap
*/ */
struct efx_tx_buffer { struct efx_tx_buffer {
union {
const struct sk_buff *skb; const struct sk_buff *skb;
struct efx_tso_header *tsoh; void *heap_buf;
};
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned short flags;
unsigned short len; unsigned short len;
bool continuation;
bool unmap_single;
unsigned short unmap_len; unsigned short unmap_len;
}; };
#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
/** /**
* struct efx_tx_queue - An Efx TX queue * struct efx_tx_queue - An Efx TX queue
@ -133,6 +135,7 @@ struct efx_tx_buffer {
* @channel: The associated channel * @channel: The associated channel
* @core_txq: The networking core TX queue structure * @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring * @buffer: The software buffer ring
* @tsoh_page: Array of pages of TSO header buffers
* @txd: The hardware descriptor ring * @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1. * @ptr_mask: The size of the ring minus 1.
* @initialised: Has hardware queue been initialised? * @initialised: Has hardware queue been initialised?
@ -156,9 +159,6 @@ struct efx_tx_buffer {
* variable indicates that the queue is full. This is to * variable indicates that the queue is full. This is to
* avoid cache-line ping-pong between the xmit path and the * avoid cache-line ping-pong between the xmit path and the
* completion path. * completion path.
* @tso_headers_free: A list of TSO headers allocated for this TX queue
* that are not in use, and so available for new TSO sends. The list
* is protected by the TX queue lock.
* @tso_bursts: Number of times TSO xmit invoked by kernel * @tso_bursts: Number of times TSO xmit invoked by kernel
* @tso_long_headers: Number of packets with headers too long for standard * @tso_long_headers: Number of packets with headers too long for standard
* blocks * blocks
@ -175,6 +175,7 @@ struct efx_tx_queue {
struct efx_channel *channel; struct efx_channel *channel;
struct netdev_queue *core_txq; struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
struct efx_buffer *tsoh_page;
struct efx_special_buffer txd; struct efx_special_buffer txd;
unsigned int ptr_mask; unsigned int ptr_mask;
bool initialised; bool initialised;
@ -187,7 +188,6 @@ struct efx_tx_queue {
unsigned int insert_count ____cacheline_aligned_in_smp; unsigned int insert_count ____cacheline_aligned_in_smp;
unsigned int write_count; unsigned int write_count;
unsigned int old_read_count; unsigned int old_read_count;
struct efx_tso_header *tso_headers_free;
unsigned int tso_bursts; unsigned int tso_bursts;
unsigned int tso_long_headers; unsigned int tso_long_headers;
unsigned int tso_packets; unsigned int tso_packets;
@ -430,11 +430,9 @@ enum efx_int_mode {
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
enum nic_state { enum nic_state {
STATE_INIT = 0, STATE_UNINIT = 0, /* device being probed/removed or is frozen */
STATE_RUNNING = 1, STATE_READY = 1, /* hardware ready and netdev registered */
STATE_FINI = 2, STATE_DISABLED = 2, /* device disabled due to hardware errors */
STATE_DISABLED = 3,
STATE_MAX,
}; };
/* /*
@ -654,7 +652,7 @@ struct vfdi_status;
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irq_rx_moderation: IRQ moderation time for RX event queues * @irq_rx_moderation: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags * @msg_enable: Log message enable flags
* @state: Device state flag. Serialised by the rtnl_lock. * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
* @reset_pending: Bitmask for pending resets * @reset_pending: Bitmask for pending resets
* @tx_queue: TX DMA queues * @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues * @rx_queue: RX DMA queues
@ -664,6 +662,8 @@ struct vfdi_status;
* should be allocated for this NIC * should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user. * @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user. * @txq_entries: Size of transmit queues requested by user.
* @txq_stop_thresh: TX queue fill level at or above which we stop it.
* @txq_wake_thresh: TX queue fill level at or below which we wake it.
* @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
* @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
* @sram_lim_qw: Qword address limit of SRAM * @sram_lim_qw: Qword address limit of SRAM
@ -774,6 +774,9 @@ struct efx_nic {
unsigned rxq_entries; unsigned rxq_entries;
unsigned txq_entries; unsigned txq_entries;
unsigned int txq_stop_thresh;
unsigned int txq_wake_thresh;
unsigned tx_dc_base; unsigned tx_dc_base;
unsigned rx_dc_base; unsigned rx_dc_base;
unsigned sram_lim_qw; unsigned sram_lim_qw;

View File

@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
/************************************************************************** /**************************************************************************
* *
* Generic buffer handling * Generic buffer handling
* These buffers are used for interrupt status and MAC stats * These buffers are used for interrupt status, MAC stats, etc.
* *
**************************************************************************/ **************************************************************************/
@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
++tx_queue->write_count; ++tx_queue->write_count;
/* Create TX descriptor ring entry */ /* Create TX descriptor ring entry */
BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
EFX_POPULATE_QWORD_4(*txd, EFX_POPULATE_QWORD_4(*txd,
FSF_AZ_TX_KER_CONT, buffer->continuation, FSF_AZ_TX_KER_CONT,
buffer->flags & EFX_TX_BUF_CONT,
FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
FSF_AZ_TX_KER_BUF_REGION, 0, FSF_AZ_TX_KER_BUF_REGION, 0,
FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);

File diff suppressed because it is too large Load Diff