Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
Ben Hutchings says: ==================== 1. Refactoring and cleanup in preparation for new hardware support. 2. Some bug fixes for firmware completion handling. (They're not known to cause real problems, otherwise I'd be submitting these for net and stable.) 3. Update to the firmware protocol (MCDI) definitions. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d853f11166
|
@ -1,4 +1,5 @@
|
|||
sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
|
||||
sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \
|
||||
filter.o \
|
||||
selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
|
||||
tenxpress.o txc43128_phy.o falcon_boards.o \
|
||||
mcdi.o mcdi_port.o mcdi_mon.o ptp.o
|
||||
|
|
|
@ -191,8 +191,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
|
|||
*
|
||||
*************************************************************************/
|
||||
|
||||
static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
|
||||
static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
|
||||
static void efx_soft_enable_interrupts(struct efx_nic *efx);
|
||||
static void efx_soft_disable_interrupts(struct efx_nic *efx);
|
||||
static void efx_remove_channel(struct efx_channel *channel);
|
||||
static void efx_remove_channels(struct efx_nic *efx);
|
||||
static const struct efx_channel_type efx_default_channel_type;
|
||||
|
@ -248,30 +248,12 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
|
|||
efx_channel_get_rx_queue(channel);
|
||||
|
||||
efx_rx_flush_packet(channel);
|
||||
if (rx_queue->enabled)
|
||||
efx_fast_push_rx_descriptors(rx_queue);
|
||||
efx_fast_push_rx_descriptors(rx_queue);
|
||||
}
|
||||
|
||||
return spent;
|
||||
}
|
||||
|
||||
/* Mark channel as finished processing
|
||||
*
|
||||
* Note that since we will not receive further interrupts for this
|
||||
* channel before we finish processing and call the eventq_read_ack()
|
||||
* method, there is no need to use the interrupt hold-off timers.
|
||||
*/
|
||||
static inline void efx_channel_processed(struct efx_channel *channel)
|
||||
{
|
||||
/* The interrupt handler for this channel may set work_pending
|
||||
* as soon as we acknowledge the events we've seen. Make sure
|
||||
* it's cleared before then. */
|
||||
channel->work_pending = false;
|
||||
smp_wmb();
|
||||
|
||||
efx_nic_eventq_read_ack(channel);
|
||||
}
|
||||
|
||||
/* NAPI poll handler
|
||||
*
|
||||
* NAPI guarantees serialisation of polls of the same device, which
|
||||
|
@ -316,58 +298,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
|
|||
|
||||
/* There is no race here; although napi_disable() will
|
||||
* only wait for napi_complete(), this isn't a problem
|
||||
* since efx_channel_processed() will have no effect if
|
||||
* since efx_nic_eventq_read_ack() will have no effect if
|
||||
* interrupts have already been disabled.
|
||||
*/
|
||||
napi_complete(napi);
|
||||
efx_channel_processed(channel);
|
||||
efx_nic_eventq_read_ack(channel);
|
||||
}
|
||||
|
||||
return spent;
|
||||
}
|
||||
|
||||
/* Process the eventq of the specified channel immediately on this CPU
|
||||
*
|
||||
* Disable hardware generated interrupts, wait for any existing
|
||||
* processing to finish, then directly poll (and ack ) the eventq.
|
||||
* Finally reenable NAPI and interrupts.
|
||||
*
|
||||
* This is for use only during a loopback self-test. It must not
|
||||
* deliver any packets up the stack as this can result in deadlock.
|
||||
*/
|
||||
void efx_process_channel_now(struct efx_channel *channel)
|
||||
{
|
||||
struct efx_nic *efx = channel->efx;
|
||||
|
||||
BUG_ON(channel->channel >= efx->n_channels);
|
||||
BUG_ON(!channel->enabled);
|
||||
BUG_ON(!efx->loopback_selftest);
|
||||
|
||||
/* Disable interrupts and wait for ISRs to complete */
|
||||
efx_nic_disable_interrupts(efx);
|
||||
if (efx->legacy_irq) {
|
||||
synchronize_irq(efx->legacy_irq);
|
||||
efx->legacy_irq_enabled = false;
|
||||
}
|
||||
if (channel->irq)
|
||||
synchronize_irq(channel->irq);
|
||||
|
||||
/* Wait for any NAPI processing to complete */
|
||||
napi_disable(&channel->napi_str);
|
||||
|
||||
/* Poll the channel */
|
||||
efx_process_channel(channel, channel->eventq_mask + 1);
|
||||
|
||||
/* Ack the eventq. This may cause an interrupt to be generated
|
||||
* when they are reenabled */
|
||||
efx_channel_processed(channel);
|
||||
|
||||
napi_enable(&channel->napi_str);
|
||||
if (efx->legacy_irq)
|
||||
efx->legacy_irq_enabled = true;
|
||||
efx_nic_enable_interrupts(efx);
|
||||
}
|
||||
|
||||
/* Create event queue
|
||||
* Event queue memory allocations are done only once. If the channel
|
||||
* is reset, the memory buffer will be reused; this guards against
|
||||
|
@ -407,11 +347,7 @@ static void efx_start_eventq(struct efx_channel *channel)
|
|||
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
|
||||
"chan %d start event queue\n", channel->channel);
|
||||
|
||||
/* The interrupt handler for this channel may set work_pending
|
||||
* as soon as we enable it. Make sure it's cleared before
|
||||
* then. Similarly, make sure it sees the enabled flag set.
|
||||
*/
|
||||
channel->work_pending = false;
|
||||
/* Make sure the NAPI handler sees the enabled flag set */
|
||||
channel->enabled = true;
|
||||
smp_wmb();
|
||||
|
||||
|
@ -583,8 +519,8 @@ static void efx_set_channel_names(struct efx_nic *efx)
|
|||
|
||||
efx_for_each_channel(channel, efx)
|
||||
channel->type->get_name(channel,
|
||||
efx->channel_name[channel->channel],
|
||||
sizeof(efx->channel_name[0]));
|
||||
efx->msi_context[channel->channel].name,
|
||||
sizeof(efx->msi_context[0].name));
|
||||
}
|
||||
|
||||
static int efx_probe_channels(struct efx_nic *efx)
|
||||
|
@ -704,30 +640,15 @@ static void efx_stop_datapath(struct efx_nic *efx)
|
|||
struct efx_channel *channel;
|
||||
struct efx_tx_queue *tx_queue;
|
||||
struct efx_rx_queue *rx_queue;
|
||||
struct pci_dev *dev = efx->pci_dev;
|
||||
int rc;
|
||||
|
||||
EFX_ASSERT_RESET_SERIALISED(efx);
|
||||
BUG_ON(efx->port_enabled);
|
||||
|
||||
/* Only perform flush if dma is enabled */
|
||||
if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
|
||||
rc = efx_nic_flush_queues(efx);
|
||||
|
||||
if (rc && EFX_WORKAROUND_7803(efx)) {
|
||||
/* Schedule a reset to recover from the flush failure. The
|
||||
* descriptor caches reference memory we're about to free,
|
||||
* but falcon_reconfigure_mac_wrapper() won't reconnect
|
||||
* the MACs because of the pending reset. */
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"Resetting to recover from flush failure\n");
|
||||
efx_schedule_reset(efx, RESET_TYPE_ALL);
|
||||
} else if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
|
||||
} else {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"successfully flushed all queues\n");
|
||||
}
|
||||
/* Stop RX refill */
|
||||
efx_for_each_channel(channel, efx) {
|
||||
efx_for_each_channel_rx_queue(rx_queue, channel)
|
||||
rx_queue->refill_enabled = false;
|
||||
}
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
|
@ -741,7 +662,26 @@ static void efx_stop_datapath(struct efx_nic *efx)
|
|||
efx_stop_eventq(channel);
|
||||
efx_start_eventq(channel);
|
||||
}
|
||||
}
|
||||
|
||||
rc = efx->type->fini_dmaq(efx);
|
||||
if (rc && EFX_WORKAROUND_7803(efx)) {
|
||||
/* Schedule a reset to recover from the flush failure. The
|
||||
* descriptor caches reference memory we're about to free,
|
||||
* but falcon_reconfigure_mac_wrapper() won't reconnect
|
||||
* the MACs because of the pending reset.
|
||||
*/
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"Resetting to recover from flush failure\n");
|
||||
efx_schedule_reset(efx, RESET_TYPE_ALL);
|
||||
} else if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
|
||||
} else {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"successfully flushed all queues\n");
|
||||
}
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
efx_for_each_channel_rx_queue(rx_queue, channel)
|
||||
efx_fini_rx_queue(rx_queue);
|
||||
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
|
||||
|
@ -809,7 +749,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
|
|||
|
||||
efx_device_detach_sync(efx);
|
||||
efx_stop_all(efx);
|
||||
efx_stop_interrupts(efx, true);
|
||||
efx_soft_disable_interrupts(efx);
|
||||
|
||||
/* Clone channels (where possible) */
|
||||
memset(other_channel, 0, sizeof(other_channel));
|
||||
|
@ -859,7 +799,7 @@ out:
|
|||
}
|
||||
}
|
||||
|
||||
efx_start_interrupts(efx, true);
|
||||
efx_soft_enable_interrupts(efx);
|
||||
efx_start_all(efx);
|
||||
netif_device_attach(efx->net_dev);
|
||||
return rc;
|
||||
|
@ -1392,8 +1332,50 @@ static int efx_probe_interrupts(struct efx_nic *efx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Enable interrupts, then probe and start the event queues */
|
||||
static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
|
||||
static void efx_soft_enable_interrupts(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
|
||||
BUG_ON(efx->state == STATE_DISABLED);
|
||||
|
||||
efx->irq_soft_enabled = true;
|
||||
smp_wmb();
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (!channel->type->keep_eventq)
|
||||
efx_init_eventq(channel);
|
||||
efx_start_eventq(channel);
|
||||
}
|
||||
|
||||
efx_mcdi_mode_event(efx);
|
||||
}
|
||||
|
||||
static void efx_soft_disable_interrupts(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
|
||||
if (efx->state == STATE_DISABLED)
|
||||
return;
|
||||
|
||||
efx_mcdi_mode_poll(efx);
|
||||
|
||||
efx->irq_soft_enabled = false;
|
||||
smp_wmb();
|
||||
|
||||
if (efx->legacy_irq)
|
||||
synchronize_irq(efx->legacy_irq);
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (channel->irq)
|
||||
synchronize_irq(channel->irq);
|
||||
|
||||
efx_stop_eventq(channel);
|
||||
if (!channel->type->keep_eventq)
|
||||
efx_fini_eventq(channel);
|
||||
}
|
||||
}
|
||||
|
||||
static void efx_enable_interrupts(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
|
||||
|
@ -1403,42 +1385,29 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
|
|||
enable_irq(efx->legacy_irq);
|
||||
efx->eeh_disabled_legacy_irq = false;
|
||||
}
|
||||
if (efx->legacy_irq)
|
||||
efx->legacy_irq_enabled = true;
|
||||
efx_nic_enable_interrupts(efx);
|
||||
|
||||
efx->type->irq_enable_master(efx);
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (!channel->type->keep_eventq || !may_keep_eventq)
|
||||
if (channel->type->keep_eventq)
|
||||
efx_init_eventq(channel);
|
||||
efx_start_eventq(channel);
|
||||
}
|
||||
|
||||
efx_mcdi_mode_event(efx);
|
||||
efx_soft_enable_interrupts(efx);
|
||||
}
|
||||
|
||||
static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
|
||||
static void efx_disable_interrupts(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
|
||||
if (efx->state == STATE_DISABLED)
|
||||
return;
|
||||
|
||||
efx_mcdi_mode_poll(efx);
|
||||
|
||||
efx_nic_disable_interrupts(efx);
|
||||
if (efx->legacy_irq) {
|
||||
synchronize_irq(efx->legacy_irq);
|
||||
efx->legacy_irq_enabled = false;
|
||||
}
|
||||
efx_soft_disable_interrupts(efx);
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (channel->irq)
|
||||
synchronize_irq(channel->irq);
|
||||
|
||||
efx_stop_eventq(channel);
|
||||
if (!channel->type->keep_eventq || !may_keep_eventq)
|
||||
if (channel->type->keep_eventq)
|
||||
efx_fini_eventq(channel);
|
||||
}
|
||||
|
||||
efx->type->irq_disable_non_ev(efx);
|
||||
}
|
||||
|
||||
static void efx_remove_interrupts(struct efx_nic *efx)
|
||||
|
@ -2185,22 +2154,11 @@ fail_locked:
|
|||
|
||||
static void efx_unregister_netdev(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
struct efx_tx_queue *tx_queue;
|
||||
|
||||
if (!efx->net_dev)
|
||||
return;
|
||||
|
||||
BUG_ON(netdev_priv(efx->net_dev) != efx);
|
||||
|
||||
/* Free up any skbs still remaining. This has to happen before
|
||||
* we try to unregister the netdev as running their destructors
|
||||
* may be needed to get the device ref. count to 0. */
|
||||
efx_for_each_channel(channel, efx) {
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel)
|
||||
efx_release_tx_buffers(tx_queue);
|
||||
}
|
||||
|
||||
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
|
||||
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
|
||||
|
||||
|
@ -2223,7 +2181,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
|
|||
EFX_ASSERT_RESET_SERIALISED(efx);
|
||||
|
||||
efx_stop_all(efx);
|
||||
efx_stop_interrupts(efx, false);
|
||||
efx_disable_interrupts(efx);
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
|
||||
|
@ -2262,7 +2220,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
|
|||
|
||||
efx->type->reconfigure_mac(efx);
|
||||
|
||||
efx_start_interrupts(efx, false);
|
||||
efx_enable_interrupts(efx);
|
||||
efx_restore_filters(efx);
|
||||
efx_sriov_reset(efx);
|
||||
|
||||
|
@ -2527,6 +2485,8 @@ static int efx_init_struct(struct efx_nic *efx,
|
|||
efx->channel[i] = efx_alloc_channel(efx, i, NULL);
|
||||
if (!efx->channel[i])
|
||||
goto fail;
|
||||
efx->msi_context[i].efx = efx;
|
||||
efx->msi_context[i].index = i;
|
||||
}
|
||||
|
||||
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
|
||||
|
@ -2579,7 +2539,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
|
|||
BUG_ON(efx->state == STATE_READY);
|
||||
cancel_work_sync(&efx->reset_work);
|
||||
|
||||
efx_stop_interrupts(efx, false);
|
||||
efx_disable_interrupts(efx);
|
||||
efx_nic_fini_interrupt(efx);
|
||||
efx_fini_port(efx);
|
||||
efx->type->fini(efx);
|
||||
|
@ -2601,7 +2561,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
|
|||
/* Mark the NIC as fini, then stop the interface */
|
||||
rtnl_lock();
|
||||
dev_close(efx->net_dev);
|
||||
efx_stop_interrupts(efx, false);
|
||||
efx_disable_interrupts(efx);
|
||||
rtnl_unlock();
|
||||
|
||||
efx_sriov_fini(efx);
|
||||
|
@ -2703,7 +2663,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
|
|||
rc = efx_nic_init_interrupt(efx);
|
||||
if (rc)
|
||||
goto fail5;
|
||||
efx_start_interrupts(efx, false);
|
||||
efx_enable_interrupts(efx);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -2824,7 +2784,7 @@ static int efx_pm_freeze(struct device *dev)
|
|||
efx_device_detach_sync(efx);
|
||||
|
||||
efx_stop_all(efx);
|
||||
efx_stop_interrupts(efx, false);
|
||||
efx_disable_interrupts(efx);
|
||||
}
|
||||
|
||||
rtnl_unlock();
|
||||
|
@ -2839,7 +2799,7 @@ static int efx_pm_thaw(struct device *dev)
|
|||
rtnl_lock();
|
||||
|
||||
if (efx->state != STATE_DISABLED) {
|
||||
efx_start_interrupts(efx, false);
|
||||
efx_enable_interrupts(efx);
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->phy_op->reconfigure(efx);
|
||||
|
@ -2942,7 +2902,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
|
|||
efx_device_detach_sync(efx);
|
||||
|
||||
efx_stop_all(efx);
|
||||
efx_stop_interrupts(efx, false);
|
||||
efx_disable_interrupts(efx);
|
||||
|
||||
status = PCI_ERS_RESULT_NEED_RESET;
|
||||
} else {
|
||||
|
|
|
@ -23,7 +23,6 @@ extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
|
|||
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
|
||||
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
|
||||
extern netdev_tx_t
|
||||
efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
|
||||
extern netdev_tx_t
|
||||
|
@ -109,7 +108,6 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
|
|||
/* Channels */
|
||||
extern int efx_channel_dummy_op_int(struct efx_channel *channel);
|
||||
extern void efx_channel_dummy_op_void(struct efx_channel *channel);
|
||||
extern void efx_process_channel_now(struct efx_channel *channel);
|
||||
extern int
|
||||
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
|
||||
|
||||
|
@ -155,7 +153,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
|
|||
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
|
||||
"channel %d scheduling NAPI poll on CPU%d\n",
|
||||
channel->channel, raw_smp_processor_id());
|
||||
channel->work_pending = true;
|
||||
|
||||
napi_schedule(&channel->napi_str);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#include "efx.h"
|
||||
#include "spi.h"
|
||||
#include "nic.h"
|
||||
#include "regs.h"
|
||||
#include "farch_regs.h"
|
||||
#include "io.h"
|
||||
#include "phy.h"
|
||||
#include "workarounds.h"
|
||||
|
@ -336,7 +336,7 @@ static void falcon_prepare_flush(struct efx_nic *efx)
|
|||
*
|
||||
* NB most hardware supports MSI interrupts
|
||||
*/
|
||||
inline void falcon_irq_ack_a1(struct efx_nic *efx)
|
||||
static inline void falcon_irq_ack_a1(struct efx_nic *efx)
|
||||
{
|
||||
efx_dword_t reg;
|
||||
|
||||
|
@ -346,7 +346,7 @@ inline void falcon_irq_ack_a1(struct efx_nic *efx)
|
|||
}
|
||||
|
||||
|
||||
irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
|
||||
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
|
||||
{
|
||||
struct efx_nic *efx = dev_id;
|
||||
efx_oword_t *int_ker = efx->irq_status.addr;
|
||||
|
@ -367,10 +367,13 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
|
|||
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
|
||||
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
|
||||
|
||||
if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Check to see if we have a serious error condition */
|
||||
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
|
||||
if (unlikely(syserr))
|
||||
return efx_nic_fatal_interrupt(efx);
|
||||
return efx_farch_fatal_interrupt(efx);
|
||||
|
||||
/* Determine interrupting queues, clear interrupt status
|
||||
* register and acknowledge the device interrupt.
|
||||
|
@ -1418,7 +1421,7 @@ static int falcon_probe_port(struct efx_nic *efx)
|
|||
|
||||
/* Allocate buffer for stats */
|
||||
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
|
||||
FALCON_MAC_STATS_SIZE);
|
||||
FALCON_MAC_STATS_SIZE, GFP_KERNEL);
|
||||
if (rc)
|
||||
return rc;
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
|
@ -1555,7 +1558,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
|
|||
return falcon_read_nvram(efx, NULL);
|
||||
}
|
||||
|
||||
static const struct efx_nic_register_test falcon_b0_register_tests[] = {
|
||||
static const struct efx_farch_register_test falcon_b0_register_tests[] = {
|
||||
{ FR_AZ_ADR_REGION,
|
||||
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
|
||||
{ FR_AZ_RX_CFG,
|
||||
|
@ -1615,8 +1618,8 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
|
|||
efx_reset_down(efx, reset_method);
|
||||
|
||||
tests->registers =
|
||||
efx_nic_test_registers(efx, falcon_b0_register_tests,
|
||||
ARRAY_SIZE(falcon_b0_register_tests))
|
||||
efx_farch_test_registers(efx, falcon_b0_register_tests,
|
||||
ARRAY_SIZE(falcon_b0_register_tests))
|
||||
? -1 : 1;
|
||||
|
||||
rc = falcon_reset_hw(efx, reset_method);
|
||||
|
@ -1981,7 +1984,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
|
|||
|
||||
rc = -ENODEV;
|
||||
|
||||
if (efx_nic_fpga_ver(efx) != 0) {
|
||||
if (efx_farch_fpga_ver(efx) != 0) {
|
||||
netif_err(efx, probe, efx->net_dev,
|
||||
"Falcon FPGA not supported\n");
|
||||
goto fail1;
|
||||
|
@ -2035,7 +2038,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
|
|||
}
|
||||
|
||||
/* Allocate memory for INT_KER */
|
||||
rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
|
||||
rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
|
||||
GFP_KERNEL);
|
||||
if (rc)
|
||||
goto fail4;
|
||||
BUG_ON(efx->irq_status.dma_addr & 0x0f);
|
||||
|
@ -2214,7 +2218,7 @@ static int falcon_init_nic(struct efx_nic *efx)
|
|||
efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
|
||||
}
|
||||
|
||||
efx_nic_init_common(efx);
|
||||
efx_farch_init_common(efx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2339,7 +2343,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
|
|||
.remove = falcon_remove_nic,
|
||||
.init = falcon_init_nic,
|
||||
.dimension_resources = falcon_dimension_resources,
|
||||
.fini = efx_port_dummy_op_void,
|
||||
.fini = falcon_irq_ack_a1,
|
||||
.monitor = falcon_monitor,
|
||||
.map_reset_reason = falcon_map_reset_reason,
|
||||
.map_reset_flags = falcon_map_reset_flags,
|
||||
|
@ -2347,6 +2351,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
|
|||
.probe_port = falcon_probe_port,
|
||||
.remove_port = falcon_remove_port,
|
||||
.handle_global_event = falcon_handle_global_event,
|
||||
.fini_dmaq = efx_farch_fini_dmaq,
|
||||
.prepare_flush = falcon_prepare_flush,
|
||||
.finish_flush = efx_port_dummy_op_void,
|
||||
.update_stats = falcon_update_nic_stats,
|
||||
|
@ -2362,6 +2367,28 @@ const struct efx_nic_type falcon_a1_nic_type = {
|
|||
.set_wol = falcon_set_wol,
|
||||
.resume_wol = efx_port_dummy_op_void,
|
||||
.test_nvram = falcon_test_nvram,
|
||||
.irq_enable_master = efx_farch_irq_enable_master,
|
||||
.irq_test_generate = efx_farch_irq_test_generate,
|
||||
.irq_disable_non_ev = efx_farch_irq_disable_master,
|
||||
.irq_handle_msi = efx_farch_msi_interrupt,
|
||||
.irq_handle_legacy = falcon_legacy_interrupt_a1,
|
||||
.tx_probe = efx_farch_tx_probe,
|
||||
.tx_init = efx_farch_tx_init,
|
||||
.tx_remove = efx_farch_tx_remove,
|
||||
.tx_write = efx_farch_tx_write,
|
||||
.rx_push_indir_table = efx_farch_rx_push_indir_table,
|
||||
.rx_probe = efx_farch_rx_probe,
|
||||
.rx_init = efx_farch_rx_init,
|
||||
.rx_remove = efx_farch_rx_remove,
|
||||
.rx_write = efx_farch_rx_write,
|
||||
.rx_defer_refill = efx_farch_rx_defer_refill,
|
||||
.ev_probe = efx_farch_ev_probe,
|
||||
.ev_init = efx_farch_ev_init,
|
||||
.ev_fini = efx_farch_ev_fini,
|
||||
.ev_remove = efx_farch_ev_remove,
|
||||
.ev_process = efx_farch_ev_process,
|
||||
.ev_read_ack = efx_farch_ev_read_ack,
|
||||
.ev_test_generate = efx_farch_ev_test_generate,
|
||||
|
||||
.revision = EFX_REV_FALCON_A1,
|
||||
.mem_map_size = 0x20000,
|
||||
|
@ -2377,6 +2404,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
|
|||
.phys_addr_channels = 4,
|
||||
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
|
||||
.offload_features = NETIF_F_IP_CSUM,
|
||||
.mcdi_max_ver = -1,
|
||||
};
|
||||
|
||||
const struct efx_nic_type falcon_b0_nic_type = {
|
||||
|
@ -2392,6 +2420,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
|
|||
.probe_port = falcon_probe_port,
|
||||
.remove_port = falcon_remove_port,
|
||||
.handle_global_event = falcon_handle_global_event,
|
||||
.fini_dmaq = efx_farch_fini_dmaq,
|
||||
.prepare_flush = falcon_prepare_flush,
|
||||
.finish_flush = efx_port_dummy_op_void,
|
||||
.update_stats = falcon_update_nic_stats,
|
||||
|
@ -2408,6 +2437,28 @@ const struct efx_nic_type falcon_b0_nic_type = {
|
|||
.resume_wol = efx_port_dummy_op_void,
|
||||
.test_chip = falcon_b0_test_chip,
|
||||
.test_nvram = falcon_test_nvram,
|
||||
.irq_enable_master = efx_farch_irq_enable_master,
|
||||
.irq_test_generate = efx_farch_irq_test_generate,
|
||||
.irq_disable_non_ev = efx_farch_irq_disable_master,
|
||||
.irq_handle_msi = efx_farch_msi_interrupt,
|
||||
.irq_handle_legacy = efx_farch_legacy_interrupt,
|
||||
.tx_probe = efx_farch_tx_probe,
|
||||
.tx_init = efx_farch_tx_init,
|
||||
.tx_remove = efx_farch_tx_remove,
|
||||
.tx_write = efx_farch_tx_write,
|
||||
.rx_push_indir_table = efx_farch_rx_push_indir_table,
|
||||
.rx_probe = efx_farch_rx_probe,
|
||||
.rx_init = efx_farch_rx_init,
|
||||
.rx_remove = efx_farch_rx_remove,
|
||||
.rx_write = efx_farch_rx_write,
|
||||
.rx_defer_refill = efx_farch_rx_defer_refill,
|
||||
.ev_probe = efx_farch_ev_probe,
|
||||
.ev_init = efx_farch_ev_init,
|
||||
.ev_fini = efx_farch_ev_fini,
|
||||
.ev_remove = efx_farch_ev_remove,
|
||||
.ev_process = efx_farch_ev_process,
|
||||
.ev_read_ack = efx_farch_ev_read_ack,
|
||||
.ev_test_generate = efx_farch_ev_test_generate,
|
||||
|
||||
.revision = EFX_REV_FALCON_B0,
|
||||
/* Map everything up to and including the RSS indirection
|
||||
|
@ -2431,5 +2482,6 @@ const struct efx_nic_type falcon_b0_nic_type = {
|
|||
* channels */
|
||||
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
|
||||
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
|
||||
.mcdi_max_ver = -1,
|
||||
};
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -8,8 +8,8 @@
|
|||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_REGS_H
|
||||
#define EFX_REGS_H
|
||||
#ifndef EFX_FARCH_REGS_H
|
||||
#define EFX_FARCH_REGS_H
|
||||
|
||||
/*
|
||||
* Falcon hardware architecture definitions have a name prefix following
|
||||
|
@ -2925,4 +2925,4 @@
|
|||
#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
|
||||
#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
|
||||
|
||||
#endif /* EFX_REGS_H */
|
||||
#endif /* EFX_FARCH_REGS_H */
|
|
@ -13,7 +13,7 @@
|
|||
#include "filter.h"
|
||||
#include "io.h"
|
||||
#include "nic.h"
|
||||
#include "regs.h"
|
||||
#include "farch_regs.h"
|
||||
|
||||
/* "Fudge factors" - difference between programmed value and actual depth.
|
||||
* Due to pipelined implementation we need to program H/W with a value that
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include "net_driver.h"
|
||||
#include "nic.h"
|
||||
#include "io.h"
|
||||
#include "regs.h"
|
||||
#include "farch_regs.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "phy.h"
|
||||
|
||||
|
@ -24,13 +24,6 @@
|
|||
|
||||
#define MCDI_RPC_TIMEOUT (10 * HZ)
|
||||
|
||||
#define MCDI_PDU(efx) \
|
||||
(efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
|
||||
#define MCDI_DOORBELL(efx) \
|
||||
(efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
|
||||
#define MCDI_STATUS(efx) \
|
||||
(efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
|
||||
|
||||
/* A reboot/assertion causes the MCDI status word to be set after the
|
||||
* command word is set or a REBOOT event is sent. If we notice a reboot
|
||||
* via these mechanisms then wait 10ms for the status word to be set. */
|
||||
|
@ -44,16 +37,18 @@
|
|||
|
||||
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
|
||||
{
|
||||
struct siena_nic_data *nic_data;
|
||||
EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
|
||||
nic_data = efx->nic_data;
|
||||
return &nic_data->mcdi;
|
||||
EFX_BUG_ON_PARANOID(!efx->mcdi);
|
||||
return &efx->mcdi->iface;
|
||||
}
|
||||
|
||||
int efx_mcdi_init(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_iface *mcdi;
|
||||
|
||||
efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
|
||||
if (!efx->mcdi)
|
||||
return -ENOMEM;
|
||||
|
||||
mcdi = efx_mcdi(efx);
|
||||
init_waitqueue_head(&mcdi->wq);
|
||||
spin_lock_init(&mcdi->iface_lock);
|
||||
|
@ -66,73 +61,142 @@ int efx_mcdi_init(struct efx_nic *efx)
|
|||
return efx_mcdi_handle_assertion(efx);
|
||||
}
|
||||
|
||||
void efx_mcdi_fini(struct efx_nic *efx)
|
||||
{
|
||||
BUG_ON(efx->mcdi &&
|
||||
atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT);
|
||||
kfree(efx->mcdi);
|
||||
}
|
||||
|
||||
static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen)
|
||||
{
|
||||
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
|
||||
unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
|
||||
unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
|
||||
unsigned int i;
|
||||
efx_dword_t hdr;
|
||||
efx_dword_t hdr[2];
|
||||
size_t hdr_len;
|
||||
u32 xflags, seqno;
|
||||
unsigned int inlen_dw = DIV_ROUND_UP(inlen, 4);
|
||||
|
||||
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
|
||||
BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V1);
|
||||
|
||||
seqno = mcdi->seqno & SEQ_MASK;
|
||||
xflags = 0;
|
||||
if (mcdi->mode == MCDI_MODE_EVENTS)
|
||||
xflags |= MCDI_HEADER_XFLAGS_EVREQ;
|
||||
|
||||
EFX_POPULATE_DWORD_6(hdr,
|
||||
MCDI_HEADER_RESPONSE, 0,
|
||||
MCDI_HEADER_RESYNC, 1,
|
||||
MCDI_HEADER_CODE, cmd,
|
||||
MCDI_HEADER_DATALEN, inlen,
|
||||
MCDI_HEADER_SEQ, seqno,
|
||||
MCDI_HEADER_XFLAGS, xflags);
|
||||
if (efx->type->mcdi_max_ver == 1) {
|
||||
/* MCDI v1 */
|
||||
EFX_POPULATE_DWORD_6(hdr[0],
|
||||
MCDI_HEADER_RESPONSE, 0,
|
||||
MCDI_HEADER_RESYNC, 1,
|
||||
MCDI_HEADER_CODE, cmd,
|
||||
MCDI_HEADER_DATALEN, inlen,
|
||||
MCDI_HEADER_SEQ, seqno,
|
||||
MCDI_HEADER_XFLAGS, xflags);
|
||||
hdr_len = 4;
|
||||
} else {
|
||||
/* MCDI v2 */
|
||||
BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
|
||||
EFX_POPULATE_DWORD_6(hdr[0],
|
||||
MCDI_HEADER_RESPONSE, 0,
|
||||
MCDI_HEADER_RESYNC, 1,
|
||||
MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
|
||||
MCDI_HEADER_DATALEN, 0,
|
||||
MCDI_HEADER_SEQ, seqno,
|
||||
MCDI_HEADER_XFLAGS, xflags);
|
||||
EFX_POPULATE_DWORD_2(hdr[1],
|
||||
MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
|
||||
MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
|
||||
hdr_len = 8;
|
||||
}
|
||||
|
||||
efx_writed(efx, &hdr, pdu);
|
||||
|
||||
for (i = 0; i < inlen_dw; i++)
|
||||
efx_writed(efx, &inbuf[i], pdu + 4 + 4 * i);
|
||||
|
||||
/* Ensure the payload is written out before the header */
|
||||
wmb();
|
||||
|
||||
/* ring the doorbell with a distinctive value */
|
||||
_efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
|
||||
efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
|
||||
}
|
||||
|
||||
static void
|
||||
efx_mcdi_copyout(struct efx_nic *efx, efx_dword_t *outbuf, size_t outlen)
|
||||
static int efx_mcdi_errno(unsigned int mcdi_err)
|
||||
{
|
||||
switch (mcdi_err) {
|
||||
case 0:
|
||||
return 0;
|
||||
#define TRANSLATE_ERROR(name) \
|
||||
case MC_CMD_ERR_ ## name: \
|
||||
return -name;
|
||||
TRANSLATE_ERROR(EPERM);
|
||||
TRANSLATE_ERROR(ENOENT);
|
||||
TRANSLATE_ERROR(EINTR);
|
||||
TRANSLATE_ERROR(EAGAIN);
|
||||
TRANSLATE_ERROR(EACCES);
|
||||
TRANSLATE_ERROR(EBUSY);
|
||||
TRANSLATE_ERROR(EINVAL);
|
||||
TRANSLATE_ERROR(EDEADLK);
|
||||
TRANSLATE_ERROR(ENOSYS);
|
||||
TRANSLATE_ERROR(ETIME);
|
||||
TRANSLATE_ERROR(EALREADY);
|
||||
TRANSLATE_ERROR(ENOSPC);
|
||||
#undef TRANSLATE_ERROR
|
||||
case MC_CMD_ERR_ALLOC_FAIL:
|
||||
return -ENOBUFS;
|
||||
case MC_CMD_ERR_MAC_EXIST:
|
||||
return -EADDRINUSE;
|
||||
default:
|
||||
return -EPROTO;
|
||||
}
|
||||
}
|
||||
|
||||
static void efx_mcdi_read_response_header(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
|
||||
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
|
||||
unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4);
|
||||
int i;
|
||||
unsigned int respseq, respcmd, error;
|
||||
efx_dword_t hdr;
|
||||
|
||||
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
|
||||
BUG_ON(outlen > MCDI_CTL_SDU_LEN_MAX_V1);
|
||||
efx->type->mcdi_read_response(efx, &hdr, 0, 4);
|
||||
respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
|
||||
respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
|
||||
error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
|
||||
|
||||
for (i = 0; i < outlen_dw; i++)
|
||||
efx_readd(efx, &outbuf[i], pdu + 4 + 4 * i);
|
||||
if (respcmd != MC_CMD_V2_EXTN) {
|
||||
mcdi->resp_hdr_len = 4;
|
||||
mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
|
||||
} else {
|
||||
efx->type->mcdi_read_response(efx, &hdr, 4, 4);
|
||||
mcdi->resp_hdr_len = 8;
|
||||
mcdi->resp_data_len =
|
||||
EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
|
||||
}
|
||||
|
||||
if (error && mcdi->resp_data_len == 0) {
|
||||
netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
|
||||
mcdi->resprc = -EIO;
|
||||
} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"MC response mismatch tx seq 0x%x rx seq 0x%x\n",
|
||||
respseq, mcdi->seqno);
|
||||
mcdi->resprc = -EIO;
|
||||
} else if (error) {
|
||||
efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
|
||||
mcdi->resprc =
|
||||
efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
|
||||
} else {
|
||||
mcdi->resprc = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int efx_mcdi_poll(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
|
||||
unsigned long time, finish;
|
||||
unsigned int respseq, respcmd, error;
|
||||
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
|
||||
unsigned int rc, spins;
|
||||
efx_dword_t reg;
|
||||
unsigned int spins;
|
||||
int rc;
|
||||
|
||||
/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
|
||||
rc = -efx_mcdi_poll_reboot(efx);
|
||||
if (rc)
|
||||
goto out;
|
||||
rc = efx_mcdi_poll_reboot(efx);
|
||||
if (rc) {
|
||||
spin_lock_bh(&mcdi->iface_lock);
|
||||
mcdi->resprc = rc;
|
||||
mcdi->resp_hdr_len = 0;
|
||||
mcdi->resp_data_len = 0;
|
||||
spin_unlock_bh(&mcdi->iface_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
|
||||
* because generally mcdi responses are fast. After that, back off
|
||||
|
@ -152,59 +216,16 @@ static int efx_mcdi_poll(struct efx_nic *efx)
|
|||
time = jiffies;
|
||||
|
||||
rmb();
|
||||
efx_readd(efx, ®, pdu);
|
||||
|
||||
/* All 1's indicates that shared memory is in reset (and is
|
||||
* not a valid header). Wait for it to come out reset before
|
||||
* completing the command */
|
||||
if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
|
||||
EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
|
||||
if (efx->type->mcdi_poll_response(efx))
|
||||
break;
|
||||
|
||||
if (time_after(time, finish))
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
|
||||
respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
|
||||
respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
|
||||
error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
|
||||
|
||||
if (error && mcdi->resplen == 0) {
|
||||
netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
|
||||
rc = EIO;
|
||||
} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"MC response mismatch tx seq 0x%x rx seq 0x%x\n",
|
||||
respseq, mcdi->seqno);
|
||||
rc = EIO;
|
||||
} else if (error) {
|
||||
efx_readd(efx, ®, pdu + 4);
|
||||
switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
|
||||
#define TRANSLATE_ERROR(name) \
|
||||
case MC_CMD_ERR_ ## name: \
|
||||
rc = name; \
|
||||
break
|
||||
TRANSLATE_ERROR(ENOENT);
|
||||
TRANSLATE_ERROR(EINTR);
|
||||
TRANSLATE_ERROR(EACCES);
|
||||
TRANSLATE_ERROR(EBUSY);
|
||||
TRANSLATE_ERROR(EINVAL);
|
||||
TRANSLATE_ERROR(EDEADLK);
|
||||
TRANSLATE_ERROR(ENOSYS);
|
||||
TRANSLATE_ERROR(ETIME);
|
||||
#undef TRANSLATE_ERROR
|
||||
default:
|
||||
rc = EIO;
|
||||
break;
|
||||
}
|
||||
} else
|
||||
rc = 0;
|
||||
|
||||
out:
|
||||
mcdi->resprc = rc;
|
||||
if (rc)
|
||||
mcdi->resplen = 0;
|
||||
spin_lock_bh(&mcdi->iface_lock);
|
||||
efx_mcdi_read_response_header(efx);
|
||||
spin_unlock_bh(&mcdi->iface_lock);
|
||||
|
||||
/* Return rc=0 like wait_event_timeout() */
|
||||
return 0;
|
||||
|
@ -215,17 +236,13 @@ out:
|
|||
*/
|
||||
int efx_mcdi_poll_reboot(struct efx_nic *efx)
|
||||
{
|
||||
unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
|
||||
efx_dword_t reg;
|
||||
uint32_t value;
|
||||
int rc;
|
||||
|
||||
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
|
||||
return false;
|
||||
if (!efx->mcdi)
|
||||
return 0;
|
||||
|
||||
efx_readd(efx, ®, addr);
|
||||
value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
|
||||
|
||||
if (value == 0)
|
||||
rc = efx->type->mcdi_poll_reboot(efx);
|
||||
if (!rc)
|
||||
return 0;
|
||||
|
||||
/* MAC statistics have been cleared on the NIC; clear our copy
|
||||
|
@ -233,13 +250,7 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx)
|
|||
*/
|
||||
memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
|
||||
|
||||
EFX_ZERO_DWORD(reg);
|
||||
efx_writed(efx, ®, addr);
|
||||
|
||||
if (value == MC_STATUS_DWORD_ASSERT)
|
||||
return -EINTR;
|
||||
else
|
||||
return -EIO;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
|
||||
|
@ -302,7 +313,7 @@ static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
|
|||
}
|
||||
|
||||
static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
|
||||
unsigned int datalen, unsigned int errno)
|
||||
unsigned int datalen, unsigned int mcdi_err)
|
||||
{
|
||||
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
|
||||
bool wake = false;
|
||||
|
@ -318,8 +329,14 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
|
|||
"MC response mismatch tx seq 0x%x rx "
|
||||
"seq 0x%x\n", seqno, mcdi->seqno);
|
||||
} else {
|
||||
mcdi->resprc = errno;
|
||||
mcdi->resplen = datalen;
|
||||
if (efx->type->mcdi_max_ver >= 2) {
|
||||
/* MCDI v2 responses don't fit in an event */
|
||||
efx_mcdi_read_response_header(efx);
|
||||
} else {
|
||||
mcdi->resprc = efx_mcdi_errno(mcdi_err);
|
||||
mcdi->resp_hdr_len = 4;
|
||||
mcdi->resp_data_len = datalen;
|
||||
}
|
||||
|
||||
wake = true;
|
||||
}
|
||||
|
@ -335,17 +352,29 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
|
|||
efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual)
|
||||
{
|
||||
efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
return efx_mcdi_rpc_finish(efx, cmd, inlen,
|
||||
outbuf, outlen, outlen_actual);
|
||||
}
|
||||
|
||||
void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen)
|
||||
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen)
|
||||
{
|
||||
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
|
||||
|
||||
BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
|
||||
if (efx->type->mcdi_max_ver < 0 ||
|
||||
(efx->type->mcdi_max_ver < 2 &&
|
||||
cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
|
||||
return -EINVAL;
|
||||
|
||||
if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
|
||||
(efx->type->mcdi_max_ver < 2 &&
|
||||
inlen > MCDI_CTL_SDU_LEN_MAX_V1))
|
||||
return -EMSGSIZE;
|
||||
|
||||
efx_mcdi_acquire(mcdi);
|
||||
|
||||
|
@ -355,6 +384,7 @@ void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
|
|||
spin_unlock_bh(&mcdi->iface_lock);
|
||||
|
||||
efx_mcdi_copyin(efx, cmd, inbuf, inlen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
|
||||
|
@ -364,8 +394,6 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
|
|||
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
|
||||
int rc;
|
||||
|
||||
BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
|
||||
|
||||
if (mcdi->mode == MCDI_MODE_POLL)
|
||||
rc = efx_mcdi_poll(efx);
|
||||
else
|
||||
|
@ -385,22 +413,25 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
|
|||
"MC command 0x%x inlen %d mode %d timed out\n",
|
||||
cmd, (int)inlen, mcdi->mode);
|
||||
} else {
|
||||
size_t resplen;
|
||||
size_t hdr_len, data_len;
|
||||
|
||||
/* At the very least we need a memory barrier here to ensure
|
||||
* we pick up changes from efx_mcdi_ev_cpl(). Protect against
|
||||
* a spurious efx_mcdi_ev_cpl() running concurrently by
|
||||
* acquiring the iface_lock. */
|
||||
spin_lock_bh(&mcdi->iface_lock);
|
||||
rc = -mcdi->resprc;
|
||||
resplen = mcdi->resplen;
|
||||
rc = mcdi->resprc;
|
||||
hdr_len = mcdi->resp_hdr_len;
|
||||
data_len = mcdi->resp_data_len;
|
||||
spin_unlock_bh(&mcdi->iface_lock);
|
||||
|
||||
BUG_ON(rc > 0);
|
||||
|
||||
if (rc == 0) {
|
||||
efx_mcdi_copyout(efx, outbuf,
|
||||
min(outlen, mcdi->resplen));
|
||||
efx->type->mcdi_read_response(efx, outbuf, hdr_len,
|
||||
min(outlen, data_len));
|
||||
if (outlen_actual != NULL)
|
||||
*outlen_actual = resplen;
|
||||
*outlen_actual = data_len;
|
||||
} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
|
||||
; /* Don't reset if MC_CMD_REBOOT returns EIO */
|
||||
else if (rc == -EIO || rc == -EINTR) {
|
||||
|
@ -426,7 +457,7 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
|
|||
{
|
||||
struct efx_mcdi_iface *mcdi;
|
||||
|
||||
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
|
||||
if (!efx->mcdi)
|
||||
return;
|
||||
|
||||
mcdi = efx_mcdi(efx);
|
||||
|
@ -450,7 +481,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
|
|||
{
|
||||
struct efx_mcdi_iface *mcdi;
|
||||
|
||||
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
|
||||
if (!efx->mcdi)
|
||||
return;
|
||||
|
||||
mcdi = efx_mcdi(efx);
|
||||
|
@ -494,7 +525,8 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
|
|||
if (efx_mcdi_complete(mcdi)) {
|
||||
if (mcdi->mode == MCDI_MODE_EVENTS) {
|
||||
mcdi->resprc = rc;
|
||||
mcdi->resplen = 0;
|
||||
mcdi->resp_hdr_len = 0;
|
||||
mcdi->resp_data_len = 0;
|
||||
++mcdi->credits;
|
||||
}
|
||||
} else {
|
||||
|
@ -526,7 +558,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
|
|||
case MCDI_EVENT_CODE_BADSSERT:
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"MC watchdog or assertion failure at 0x%x\n", data);
|
||||
efx_mcdi_ev_death(efx, EINTR);
|
||||
efx_mcdi_ev_death(efx, -EINTR);
|
||||
break;
|
||||
|
||||
case MCDI_EVENT_CODE_PMNOTICE:
|
||||
|
@ -552,7 +584,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
|
|||
break;
|
||||
case MCDI_EVENT_CODE_REBOOT:
|
||||
netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
|
||||
efx_mcdi_ev_death(efx, EIO);
|
||||
efx_mcdi_ev_death(efx, -EIO);
|
||||
break;
|
||||
case MCDI_EVENT_CODE_MAC_STATS_DMA:
|
||||
/* MAC stats are gather lazily. We can ignore this. */
|
||||
|
@ -620,6 +652,7 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
|
|||
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
|
||||
driver_operating ? 1 : 0);
|
||||
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
|
||||
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
|
|
|
@ -11,14 +11,14 @@
|
|||
#define EFX_MCDI_H
|
||||
|
||||
/**
|
||||
* enum efx_mcdi_state
|
||||
* enum efx_mcdi_state - MCDI request handling state
|
||||
* @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
|
||||
* mcdi_lock then they are able to move to MCDI_STATE_RUNNING
|
||||
* mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
|
||||
* @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that
|
||||
* moved into this state is allowed to move out of it.
|
||||
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
|
||||
* has not yet consumed the result. For all other threads, equivalent to
|
||||
* MCDI_STATE_RUNNING.
|
||||
* %MCDI_STATE_RUNNING.
|
||||
*/
|
||||
enum efx_mcdi_state {
|
||||
MCDI_STATE_QUIESCENT,
|
||||
|
@ -32,28 +32,28 @@ enum efx_mcdi_mode {
|
|||
};
|
||||
|
||||
/**
|
||||
* struct efx_mcdi_iface
|
||||
* @state: Interface state. Waited for by mcdi_wq.
|
||||
* @wq: Wait queue for threads waiting for state != STATE_RUNNING
|
||||
* @iface_lock: Protects @credits, @seqno, @resprc, @resplen
|
||||
* struct efx_mcdi_iface - MCDI protocol context
|
||||
* @state: Request handling state. Waited for by @wq.
|
||||
* @mode: Poll for mcdi completion, or wait for an mcdi_event.
|
||||
* Serialised by @lock
|
||||
* @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
|
||||
* @iface_lock: Serialises access to all the following fields
|
||||
* @seqno: The next sequence number to use for mcdi requests.
|
||||
* Serialised by @lock
|
||||
* @credits: Number of spurious MCDI completion events allowed before we
|
||||
* trigger a fatal error. Protected by @lock
|
||||
* @resprc: Returned MCDI completion
|
||||
* @resplen: Returned payload length
|
||||
* trigger a fatal error
|
||||
* @resprc: Response error/success code (Linux numbering)
|
||||
* @resp_hdr_len: Response header length
|
||||
* @resp_data_len: Response data (SDU or error) length
|
||||
*/
|
||||
struct efx_mcdi_iface {
|
||||
atomic_t state;
|
||||
enum efx_mcdi_mode mode;
|
||||
wait_queue_head_t wq;
|
||||
spinlock_t iface_lock;
|
||||
enum efx_mcdi_mode mode;
|
||||
unsigned int credits;
|
||||
unsigned int seqno;
|
||||
unsigned int resprc;
|
||||
size_t resplen;
|
||||
int resprc;
|
||||
size_t resp_hdr_len;
|
||||
size_t resp_data_len;
|
||||
};
|
||||
|
||||
struct efx_mcdi_mon {
|
||||
|
@ -65,15 +65,36 @@ struct efx_mcdi_mon {
|
|||
unsigned int n_attrs;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_mcdi_data - extra state for NICs that implement MCDI
|
||||
* @iface: Interface/protocol state
|
||||
* @hwmon: Hardware monitor state
|
||||
*/
|
||||
struct efx_mcdi_data {
|
||||
struct efx_mcdi_iface iface;
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
struct efx_mcdi_mon hwmon;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
|
||||
{
|
||||
EFX_BUG_ON_PARANOID(!efx->mcdi);
|
||||
return &efx->mcdi->hwmon;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int efx_mcdi_init(struct efx_nic *efx);
|
||||
extern void efx_mcdi_fini(struct efx_nic *efx);
|
||||
|
||||
extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen,
|
||||
efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual);
|
||||
|
||||
extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen);
|
||||
extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen);
|
||||
extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
|
||||
efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual);
|
||||
|
|
|
@ -261,7 +261,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
|
|||
return -EIO;
|
||||
|
||||
rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
|
||||
4 * MC_CMD_SENSOR_ENTRY_MAXNUM);
|
||||
4 * MC_CMD_SENSOR_ENTRY_MAXNUM, GFP_KERNEL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -111,7 +111,8 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
|
|||
if (rc)
|
||||
goto fail;
|
||||
|
||||
if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
|
||||
if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
|
||||
MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -989,7 +990,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
|
|||
|
||||
/* Allocate buffer for stats */
|
||||
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
|
||||
MC_CMD_MAC_NSTATS * sizeof(u64));
|
||||
MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
|
||||
if (rc)
|
||||
return rc;
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
|
|
|
@ -93,21 +93,36 @@ struct efx_ptp_data;
|
|||
struct efx_self_tests;
|
||||
|
||||
/**
|
||||
* struct efx_special_buffer - An Efx special buffer
|
||||
* @addr: CPU base address of the buffer
|
||||
* struct efx_buffer - A general-purpose DMA buffer
|
||||
* @addr: host base address of the buffer
|
||||
* @dma_addr: DMA base address of the buffer
|
||||
* @len: Buffer length, in bytes
|
||||
* @index: Buffer index within controller;s buffer table
|
||||
* @entries: Number of buffer table entries
|
||||
*
|
||||
* Special buffers are used for the event queues and the TX and RX
|
||||
* descriptor queues for each channel. They are *not* used for the
|
||||
* actual transmit and receive buffers.
|
||||
* The NIC uses these buffers for its interrupt status registers and
|
||||
* MAC stats dumps.
|
||||
*/
|
||||
struct efx_special_buffer {
|
||||
struct efx_buffer {
|
||||
void *addr;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned int len;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_special_buffer - DMA buffer entered into buffer table
|
||||
* @buf: Standard &struct efx_buffer
|
||||
* @index: Buffer index within controller;s buffer table
|
||||
* @entries: Number of buffer table entries
|
||||
*
|
||||
* The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
|
||||
* Event and descriptor rings are addressed via one or more buffer
|
||||
* table entries (and so can be physically non-contiguous, although we
|
||||
* currently do not take advantage of that). On Falcon and Siena we
|
||||
* have to take care of allocating and initialising the entries
|
||||
* ourselves. On later hardware this is managed by the firmware and
|
||||
* @index and @entries are left as 0.
|
||||
*/
|
||||
struct efx_special_buffer {
|
||||
struct efx_buffer buf;
|
||||
unsigned int index;
|
||||
unsigned int entries;
|
||||
};
|
||||
|
@ -271,7 +286,7 @@ struct efx_rx_page_state {
|
|||
* @buffer: The software buffer ring
|
||||
* @rxd: The hardware descriptor ring
|
||||
* @ptr_mask: The size of the ring minus 1.
|
||||
* @enabled: Receive queue enabled indicator.
|
||||
* @refill_enabled: Enable refill whenever fill level is low
|
||||
* @flush_pending: Set when a RX flush is pending. Has the same lifetime as
|
||||
* @rxq_flush_pending.
|
||||
* @added_count: Number of buffers added to the receive queue.
|
||||
|
@ -302,7 +317,7 @@ struct efx_rx_queue {
|
|||
struct efx_rx_buffer *buffer;
|
||||
struct efx_special_buffer rxd;
|
||||
unsigned int ptr_mask;
|
||||
bool enabled;
|
||||
bool refill_enabled;
|
||||
bool flush_pending;
|
||||
|
||||
unsigned int added_count;
|
||||
|
@ -325,22 +340,6 @@ struct efx_rx_queue {
|
|||
unsigned int slow_fill_count;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_buffer - An Efx general-purpose buffer
|
||||
* @addr: host base address of the buffer
|
||||
* @dma_addr: DMA base address of the buffer
|
||||
* @len: Buffer length, in bytes
|
||||
*
|
||||
* The NIC uses these buffers for its interrupt status registers and
|
||||
* MAC stats dumps.
|
||||
*/
|
||||
struct efx_buffer {
|
||||
void *addr;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned int len;
|
||||
};
|
||||
|
||||
|
||||
enum efx_rx_alloc_method {
|
||||
RX_ALLOC_METHOD_AUTO = 0,
|
||||
RX_ALLOC_METHOD_SKB = 1,
|
||||
|
@ -362,7 +361,6 @@ enum efx_rx_alloc_method {
|
|||
* @irq_moderation: IRQ moderation value (in hardware ticks)
|
||||
* @napi_dev: Net device used with NAPI
|
||||
* @napi_str: NAPI control structure
|
||||
* @work_pending: Is work pending via NAPI?
|
||||
* @eventq: Event queue buffer
|
||||
* @eventq_mask: Event queue pointer mask
|
||||
* @eventq_read_ptr: Event queue read pointer
|
||||
|
@ -394,7 +392,6 @@ struct efx_channel {
|
|||
unsigned int irq_moderation;
|
||||
struct net_device *napi_dev;
|
||||
struct napi_struct napi_str;
|
||||
bool work_pending;
|
||||
struct efx_special_buffer eventq;
|
||||
unsigned int eventq_mask;
|
||||
unsigned int eventq_read_ptr;
|
||||
|
@ -422,6 +419,21 @@ struct efx_channel {
|
|||
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_msi_context - Context for each MSI
|
||||
* @efx: The associated NIC
|
||||
* @index: Index of the channel/IRQ
|
||||
* @name: Name of the channel/IRQ
|
||||
*
|
||||
* Unlike &struct efx_channel, this is never reallocated and is always
|
||||
* safe for the IRQ handler to access.
|
||||
*/
|
||||
struct efx_msi_context {
|
||||
struct efx_nic *efx;
|
||||
unsigned int index;
|
||||
char name[IFNAMSIZ + 6];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_channel_type - distinguishes traffic and extra channels
|
||||
* @handle_no_channel: Handle failure to allocate an extra channel
|
||||
|
@ -672,7 +684,6 @@ struct vfdi_status;
|
|||
* @pci_dev: The PCI device
|
||||
* @type: Controller type attributes
|
||||
* @legacy_irq: IRQ number
|
||||
* @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
|
||||
* @workqueue: Workqueue for port reconfigures and the HW monitor.
|
||||
* Work items do not hold and must not acquire RTNL.
|
||||
* @workqueue_name: Name of workqueue
|
||||
|
@ -689,7 +700,7 @@ struct vfdi_status;
|
|||
* @tx_queue: TX DMA queues
|
||||
* @rx_queue: RX DMA queues
|
||||
* @channel: Channels
|
||||
* @channel_name: Names for channels and their IRQs
|
||||
* @msi_context: Context for each MSI
|
||||
* @extra_channel_types: Types of extra (non-traffic) channels that
|
||||
* should be allocated for this NIC
|
||||
* @rxq_entries: Size of receive queues requested by user.
|
||||
|
@ -712,12 +723,15 @@ struct vfdi_status;
|
|||
* @rx_scatter: Scatter mode enabled for receives
|
||||
* @int_error_count: Number of internal errors seen recently
|
||||
* @int_error_expire: Time at which error count will be expired
|
||||
* @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
|
||||
* acknowledge but do nothing else.
|
||||
* @irq_status: Interrupt status buffer
|
||||
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
|
||||
* @irq_level: IRQ level/index for IRQs not triggered by an event queue
|
||||
* @selftest_work: Work item for asynchronous self-test
|
||||
* @mtd_list: List of MTDs attached to the NIC
|
||||
* @nic_data: Hardware dependent state
|
||||
* @mcdi: Management-Controller-to-Driver Interface state
|
||||
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
|
||||
* efx_monitor() and efx_reconfigure_port()
|
||||
* @port_enabled: Port enabled indicator.
|
||||
|
@ -788,7 +802,6 @@ struct efx_nic {
|
|||
unsigned int port_num;
|
||||
const struct efx_nic_type *type;
|
||||
int legacy_irq;
|
||||
bool legacy_irq_enabled;
|
||||
bool eeh_disabled_legacy_irq;
|
||||
struct workqueue_struct *workqueue;
|
||||
char workqueue_name[16];
|
||||
|
@ -806,7 +819,7 @@ struct efx_nic {
|
|||
unsigned long reset_pending;
|
||||
|
||||
struct efx_channel *channel[EFX_MAX_CHANNELS];
|
||||
char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
|
||||
struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
|
||||
const struct efx_channel_type *
|
||||
extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
|
||||
|
||||
|
@ -837,6 +850,7 @@ struct efx_nic {
|
|||
unsigned int_error_count;
|
||||
unsigned long int_error_expire;
|
||||
|
||||
bool irq_soft_enabled;
|
||||
struct efx_buffer irq_status;
|
||||
unsigned irq_zero_count;
|
||||
unsigned irq_level;
|
||||
|
@ -847,6 +861,7 @@ struct efx_nic {
|
|||
#endif
|
||||
|
||||
void *nic_data;
|
||||
struct efx_mcdi_data *mcdi;
|
||||
|
||||
struct mutex mac_lock;
|
||||
struct work_struct mac_work;
|
||||
|
@ -938,8 +953,11 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
|
|||
* @probe_port: Probe the MAC and PHY
|
||||
* @remove_port: Free resources allocated by probe_port()
|
||||
* @handle_global_event: Handle a "global" event (may be %NULL)
|
||||
* @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
|
||||
* @prepare_flush: Prepare the hardware for flushing the DMA queues
|
||||
* @finish_flush: Clean up after flushing the DMA queues
|
||||
* (for Falcon architecture)
|
||||
* @finish_flush: Clean up after flushing the DMA queues (for Falcon
|
||||
* architecture)
|
||||
* @update_stats: Update statistics not provided by event handling
|
||||
* @start_stats: Start the regular fetching of statistics
|
||||
* @stop_stats: Stop the regular fetching of statistics
|
||||
|
@ -953,9 +971,46 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
|
|||
* @get_wol: Get WoL configuration from driver state
|
||||
* @set_wol: Push WoL configuration to the NIC
|
||||
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
|
||||
* @test_chip: Test registers. Should use efx_nic_test_registers(), and is
|
||||
* @test_chip: Test registers. May use efx_farch_test_registers(), and is
|
||||
* expected to reset the NIC.
|
||||
* @test_nvram: Test validity of NVRAM contents
|
||||
* @mcdi_request: Send an MCDI request with the given header and SDU.
|
||||
* The SDU length may be any value from 0 up to the protocol-
|
||||
* defined maximum, but its buffer will be padded to a multiple
|
||||
* of 4 bytes.
|
||||
* @mcdi_poll_response: Test whether an MCDI response is available.
|
||||
* @mcdi_read_response: Read the MCDI response PDU. The offset will
|
||||
* be a multiple of 4. The length may not be, but the buffer
|
||||
* will be padded so it is safe to round up.
|
||||
* @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so,
|
||||
* return an appropriate error code for aborting any current
|
||||
* request; otherwise return 0.
|
||||
* @irq_enable_master: Enable IRQs on the NIC. Each event queue must
|
||||
* be separately enabled after this.
|
||||
* @irq_test_generate: Generate a test IRQ
|
||||
* @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
|
||||
* queue must be separately disabled before this.
|
||||
* @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
|
||||
* a pointer to the &struct efx_msi_context for the channel.
|
||||
* @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
|
||||
* is a pointer to the &struct efx_nic.
|
||||
* @tx_probe: Allocate resources for TX queue
|
||||
* @tx_init: Initialise TX queue on the NIC
|
||||
* @tx_remove: Free resources for TX queue
|
||||
* @tx_write: Write TX descriptors and doorbell
|
||||
* @rx_push_indir_table: Write RSS indirection table to the NIC
|
||||
* @rx_probe: Allocate resources for RX queue
|
||||
* @rx_init: Initialise RX queue on the NIC
|
||||
* @rx_remove: Free resources for RX queue
|
||||
* @rx_write: Write RX descriptors and doorbell
|
||||
* @rx_defer_refill: Generate a refill reminder event
|
||||
* @ev_probe: Allocate resources for event queue
|
||||
* @ev_init: Initialise event queue on the NIC
|
||||
* @ev_fini: Deinitialise event queue on the NIC
|
||||
* @ev_remove: Free resources for event queue
|
||||
* @ev_process: Process events for a queue, up to the given NAPI quota
|
||||
* @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
|
||||
* @ev_test_generate: Generate a test event
|
||||
* @revision: Hardware architecture revision
|
||||
* @mem_map_size: Memory BAR mapped size
|
||||
* @txd_ptr_tbl_base: TX descriptor ring base address
|
||||
|
@ -974,6 +1029,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
|
|||
* @timer_period_max: Maximum period of interrupt timer (in ticks)
|
||||
* @offload_features: net_device feature flags for protocol offload
|
||||
* features implemented in hardware
|
||||
* @mcdi_max_ver: Maximum MCDI version supported
|
||||
*/
|
||||
struct efx_nic_type {
|
||||
int (*probe)(struct efx_nic *efx);
|
||||
|
@ -988,6 +1044,7 @@ struct efx_nic_type {
|
|||
int (*probe_port)(struct efx_nic *efx);
|
||||
void (*remove_port)(struct efx_nic *efx);
|
||||
bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
|
||||
int (*fini_dmaq)(struct efx_nic *efx);
|
||||
void (*prepare_flush)(struct efx_nic *efx);
|
||||
void (*finish_flush)(struct efx_nic *efx);
|
||||
void (*update_stats)(struct efx_nic *efx);
|
||||
|
@ -1004,6 +1061,35 @@ struct efx_nic_type {
|
|||
void (*resume_wol)(struct efx_nic *efx);
|
||||
int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
|
||||
int (*test_nvram)(struct efx_nic *efx);
|
||||
void (*mcdi_request)(struct efx_nic *efx,
|
||||
const efx_dword_t *hdr, size_t hdr_len,
|
||||
const efx_dword_t *sdu, size_t sdu_len);
|
||||
bool (*mcdi_poll_response)(struct efx_nic *efx);
|
||||
void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
|
||||
size_t pdu_offset, size_t pdu_len);
|
||||
int (*mcdi_poll_reboot)(struct efx_nic *efx);
|
||||
void (*irq_enable_master)(struct efx_nic *efx);
|
||||
void (*irq_test_generate)(struct efx_nic *efx);
|
||||
void (*irq_disable_non_ev)(struct efx_nic *efx);
|
||||
irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
|
||||
irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
|
||||
int (*tx_probe)(struct efx_tx_queue *tx_queue);
|
||||
void (*tx_init)(struct efx_tx_queue *tx_queue);
|
||||
void (*tx_remove)(struct efx_tx_queue *tx_queue);
|
||||
void (*tx_write)(struct efx_tx_queue *tx_queue);
|
||||
void (*rx_push_indir_table)(struct efx_nic *efx);
|
||||
int (*rx_probe)(struct efx_rx_queue *rx_queue);
|
||||
void (*rx_init)(struct efx_rx_queue *rx_queue);
|
||||
void (*rx_remove)(struct efx_rx_queue *rx_queue);
|
||||
void (*rx_write)(struct efx_rx_queue *rx_queue);
|
||||
void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
|
||||
int (*ev_probe)(struct efx_channel *channel);
|
||||
void (*ev_init)(struct efx_channel *channel);
|
||||
void (*ev_fini)(struct efx_channel *channel);
|
||||
void (*ev_remove)(struct efx_channel *channel);
|
||||
int (*ev_process)(struct efx_channel *channel, int quota);
|
||||
void (*ev_read_ack)(struct efx_channel *channel);
|
||||
void (*ev_test_generate)(struct efx_channel *channel);
|
||||
|
||||
int revision;
|
||||
unsigned int mem_map_size;
|
||||
|
@ -1020,6 +1106,7 @@ struct efx_nic_type {
|
|||
unsigned int phys_addr_channels;
|
||||
unsigned int timer_period_max;
|
||||
netdev_features_t offload_features;
|
||||
int mcdi_max_ver;
|
||||
};
|
||||
|
||||
/**************************************************************************
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -34,7 +34,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
|
|||
return efx->type->revision;
|
||||
}
|
||||
|
||||
extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
|
||||
extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
|
||||
|
||||
/* NIC has two interlinked PCI functions for the same port. */
|
||||
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
|
||||
|
@ -42,6 +42,65 @@ static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
|
|||
return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
|
||||
}
|
||||
|
||||
/* Read the current event from the event queue */
|
||||
static inline efx_qword_t *efx_event(struct efx_channel *channel,
|
||||
unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (channel->eventq.buf.addr)) +
|
||||
(index & channel->eventq_mask);
|
||||
}
|
||||
|
||||
/* See if an event is present
|
||||
*
|
||||
* We check both the high and low dword of the event for all ones. We
|
||||
* wrote all ones when we cleared the event, and no valid event can
|
||||
* have all ones in either its high or low dwords. This approach is
|
||||
* robust against reordering.
|
||||
*
|
||||
* Note that using a single 64-bit comparison is incorrect; even
|
||||
* though the CPU read will be atomic, the DMA write may not be.
|
||||
*/
|
||||
static inline int efx_event_present(efx_qword_t *event)
|
||||
{
|
||||
return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
|
||||
EFX_DWORD_IS_ALL_ONES(event->dword[1]));
|
||||
}
|
||||
|
||||
/* Returns a pointer to the specified transmit descriptor in the TX
|
||||
* descriptor queue belonging to the specified channel.
|
||||
*/
|
||||
static inline efx_qword_t *
|
||||
efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
|
||||
}
|
||||
|
||||
/* Decide whether to push a TX descriptor to the NIC vs merely writing
|
||||
* the doorbell. This can reduce latency when we are adding a single
|
||||
* descriptor to an empty queue, but is otherwise pointless. Further,
|
||||
* Falcon and Siena have hardware bugs (SF bug 33851) that may be
|
||||
* triggered if we don't check this.
|
||||
*/
|
||||
static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
|
||||
unsigned int write_count)
|
||||
{
|
||||
unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
|
||||
|
||||
if (empty_read_count == 0)
|
||||
return false;
|
||||
|
||||
tx_queue->empty_read_count = 0;
|
||||
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
|
||||
&& tx_queue->write_count - write_count == 1;
|
||||
}
|
||||
|
||||
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
|
||||
static inline efx_qword_t *
|
||||
efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
|
||||
}
|
||||
|
||||
enum {
|
||||
PHY_TYPE_NONE = 0,
|
||||
PHY_TYPE_TXC43128 = 1,
|
||||
|
@ -140,28 +199,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
|
|||
|
||||
/**
|
||||
* struct siena_nic_data - Siena NIC state
|
||||
* @mcdi: Management-Controller-to-Driver Interface
|
||||
* @wol_filter_id: Wake-on-LAN packet filter id
|
||||
* @hwmon: Hardware monitor state
|
||||
*/
|
||||
struct siena_nic_data {
|
||||
struct efx_mcdi_iface mcdi;
|
||||
int wol_filter_id;
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
struct efx_mcdi_mon hwmon;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
|
||||
{
|
||||
struct siena_nic_data *nic_data;
|
||||
EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
|
||||
nic_data = efx->nic_data;
|
||||
return &nic_data->hwmon;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On the SFC9000 family each port is associated with 1 PCI physical
|
||||
* function (PF) handled by sfc and a configurable number of virtual
|
||||
|
@ -274,27 +317,93 @@ extern const struct efx_nic_type siena_a0_nic_type;
|
|||
extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
|
||||
|
||||
/* TX data path */
|
||||
extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
|
||||
extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
|
||||
extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
|
||||
extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
|
||||
extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
|
||||
static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
return tx_queue->efx->type->tx_probe(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
tx_queue->efx->type->tx_init(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
tx_queue->efx->type->tx_remove(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
tx_queue->efx->type->tx_write(tx_queue);
|
||||
}
|
||||
|
||||
/* RX data path */
|
||||
extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
|
||||
extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
|
||||
extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
|
||||
extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
|
||||
extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
|
||||
extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue);
|
||||
static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
return rx_queue->efx->type->rx_probe(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_init(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_remove(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_write(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_defer_refill(rx_queue);
|
||||
}
|
||||
|
||||
/* Event data path */
|
||||
extern int efx_nic_probe_eventq(struct efx_channel *channel);
|
||||
extern void efx_nic_init_eventq(struct efx_channel *channel);
|
||||
extern void efx_nic_fini_eventq(struct efx_channel *channel);
|
||||
extern void efx_nic_remove_eventq(struct efx_channel *channel);
|
||||
extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
|
||||
extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
|
||||
static inline int efx_nic_probe_eventq(struct efx_channel *channel)
|
||||
{
|
||||
return channel->efx->type->ev_probe(channel);
|
||||
}
|
||||
static inline void efx_nic_init_eventq(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_init(channel);
|
||||
}
|
||||
static inline void efx_nic_fini_eventq(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_fini(channel);
|
||||
}
|
||||
static inline void efx_nic_remove_eventq(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_remove(channel);
|
||||
}
|
||||
static inline int
|
||||
efx_nic_process_eventq(struct efx_channel *channel, int quota)
|
||||
{
|
||||
return channel->efx->type->ev_process(channel, quota);
|
||||
}
|
||||
static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_read_ack(channel);
|
||||
}
|
||||
extern void efx_nic_event_test_start(struct efx_channel *channel);
|
||||
|
||||
/* Falcon/Siena queue operations */
|
||||
extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
|
||||
extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
|
||||
extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
|
||||
extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
|
||||
extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
|
||||
extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
|
||||
extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
|
||||
extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
|
||||
extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
|
||||
extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
|
||||
extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
|
||||
extern int efx_farch_ev_probe(struct efx_channel *channel);
|
||||
extern void efx_farch_ev_init(struct efx_channel *channel);
|
||||
extern void efx_farch_ev_fini(struct efx_channel *channel);
|
||||
extern void efx_farch_ev_remove(struct efx_channel *channel);
|
||||
extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
|
||||
extern void efx_farch_ev_read_ack(struct efx_channel *channel);
|
||||
extern void efx_farch_ev_test_generate(struct efx_channel *channel);
|
||||
|
||||
extern bool efx_nic_event_present(struct efx_channel *channel);
|
||||
|
||||
/* Some statistics are computed as A - B where A and B each increase
|
||||
|
@ -315,16 +424,18 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
|
|||
*stat = diff;
|
||||
}
|
||||
|
||||
/* Interrupts and test events */
|
||||
/* Interrupts */
|
||||
extern int efx_nic_init_interrupt(struct efx_nic *efx);
|
||||
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
|
||||
extern void efx_nic_event_test_start(struct efx_channel *channel);
|
||||
extern void efx_nic_irq_test_start(struct efx_nic *efx);
|
||||
extern void efx_nic_disable_interrupts(struct efx_nic *efx);
|
||||
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
|
||||
extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
|
||||
extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
|
||||
extern void falcon_irq_ack_a1(struct efx_nic *efx);
|
||||
|
||||
/* Falcon/Siena interrupts */
|
||||
extern void efx_farch_irq_enable_master(struct efx_nic *efx);
|
||||
extern void efx_farch_irq_test_generate(struct efx_nic *efx);
|
||||
extern void efx_farch_irq_disable_master(struct efx_nic *efx);
|
||||
extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
|
||||
extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
|
||||
extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
|
||||
|
||||
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
|
||||
{
|
||||
|
@ -338,34 +449,38 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
|
|||
/* Global Resources */
|
||||
extern int efx_nic_flush_queues(struct efx_nic *efx);
|
||||
extern void siena_prepare_flush(struct efx_nic *efx);
|
||||
extern int efx_farch_fini_dmaq(struct efx_nic *efx);
|
||||
extern void siena_finish_flush(struct efx_nic *efx);
|
||||
extern void falcon_start_nic_stats(struct efx_nic *efx);
|
||||
extern void falcon_stop_nic_stats(struct efx_nic *efx);
|
||||
extern int falcon_reset_xaui(struct efx_nic *efx);
|
||||
extern void
|
||||
efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
|
||||
extern void efx_nic_init_common(struct efx_nic *efx);
|
||||
extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
|
||||
extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
|
||||
extern void efx_farch_init_common(struct efx_nic *efx);
|
||||
static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
|
||||
{
|
||||
efx->type->rx_push_indir_table(efx);
|
||||
}
|
||||
extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
|
||||
|
||||
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
|
||||
unsigned int len);
|
||||
unsigned int len, gfp_t gfp_flags);
|
||||
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
|
||||
|
||||
/* Tests */
|
||||
struct efx_nic_register_test {
|
||||
struct efx_farch_register_test {
|
||||
unsigned address;
|
||||
efx_oword_t mask;
|
||||
};
|
||||
extern int efx_nic_test_registers(struct efx_nic *efx,
|
||||
const struct efx_nic_register_test *regs,
|
||||
size_t n_regs);
|
||||
extern int efx_farch_test_registers(struct efx_nic *efx,
|
||||
const struct efx_farch_register_test *regs,
|
||||
size_t n_regs);
|
||||
|
||||
extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
|
||||
extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
|
||||
|
||||
#define EFX_MAX_FLUSH_TIME 5000
|
||||
|
||||
extern void efx_generate_event(struct efx_nic *efx, unsigned int evq,
|
||||
efx_qword_t *event);
|
||||
extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
|
||||
efx_qword_t *event);
|
||||
|
||||
#endif /* EFX_NIC_H */
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "io.h"
|
||||
#include "regs.h"
|
||||
#include "farch_regs.h"
|
||||
#include "nic.h"
|
||||
|
||||
/* Maximum number of events expected to make up a PTP event */
|
||||
|
@ -538,8 +538,9 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
|
|||
|
||||
/* Clear flag that signals MC ready */
|
||||
ACCESS_ONCE(*start) = 0;
|
||||
efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
|
||||
MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
|
||||
rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
|
||||
MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
|
||||
EFX_BUG_ON_PARANOID(rc);
|
||||
|
||||
/* Wait for start from MCDI (or timeout) */
|
||||
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
|
||||
|
@ -875,7 +876,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
|
|||
if (!efx->ptp_data)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int));
|
||||
rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
|
||||
if (rc != 0)
|
||||
goto fail1;
|
||||
|
||||
|
|
|
@ -326,6 +326,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
|
|||
unsigned int fill_level, batch_size;
|
||||
int space, rc = 0;
|
||||
|
||||
if (!rx_queue->refill_enabled)
|
||||
return;
|
||||
|
||||
/* Calculate current fill level, and exit if we don't need to fill */
|
||||
fill_level = (rx_queue->added_count - rx_queue->removed_count);
|
||||
EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
|
||||
|
@ -738,9 +741,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
|||
|
||||
rx_queue->max_fill = max_fill;
|
||||
rx_queue->fast_fill_trigger = trigger;
|
||||
rx_queue->refill_enabled = true;
|
||||
|
||||
/* Set up RX descriptor ring */
|
||||
rx_queue->enabled = true;
|
||||
efx_nic_init_rx(rx_queue);
|
||||
}
|
||||
|
||||
|
@ -753,11 +756,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
|
|||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
|
||||
|
||||
/* A flush failure might have left rx_queue->enabled */
|
||||
rx_queue->enabled = false;
|
||||
|
||||
del_timer_sync(&rx_queue->slow_fill);
|
||||
efx_nic_fini_rx(rx_queue);
|
||||
|
||||
/* Release RX buffers from the current read ptr to the write ptr */
|
||||
if (rx_queue->buffer) {
|
||||
|
|
|
@ -447,14 +447,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
|
|||
static int efx_poll_loopback(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct efx_channel *channel;
|
||||
|
||||
/* NAPI polling is not enabled, so process channels
|
||||
* synchronously */
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (channel->work_pending)
|
||||
efx_process_channel_now(channel);
|
||||
}
|
||||
return atomic_read(&state->rx_good) == state->packet_count;
|
||||
}
|
||||
|
||||
|
@ -586,10 +579,6 @@ static int efx_wait_for_link(struct efx_nic *efx)
|
|||
mutex_lock(&efx->mac_lock);
|
||||
efx->type->monitor(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
} else {
|
||||
struct efx_channel *channel = efx_get_channel(efx, 0);
|
||||
if (channel->work_pending)
|
||||
efx_process_channel_now(channel);
|
||||
}
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "efx.h"
|
||||
#include "nic.h"
|
||||
#include "spi.h"
|
||||
#include "regs.h"
|
||||
#include "farch_regs.h"
|
||||
#include "io.h"
|
||||
#include "phy.h"
|
||||
#include "workarounds.h"
|
||||
|
@ -63,7 +63,7 @@ void siena_finish_flush(struct efx_nic *efx)
|
|||
efx_mcdi_set_mac(efx);
|
||||
}
|
||||
|
||||
static const struct efx_nic_register_test siena_register_tests[] = {
|
||||
static const struct efx_farch_register_test siena_register_tests[] = {
|
||||
{ FR_AZ_ADR_REGION,
|
||||
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
|
||||
{ FR_CZ_USR_EV_CFG,
|
||||
|
@ -107,8 +107,8 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
|
|||
goto out;
|
||||
|
||||
tests->registers =
|
||||
efx_nic_test_registers(efx, siena_register_tests,
|
||||
ARRAY_SIZE(siena_register_tests))
|
||||
efx_farch_test_registers(efx, siena_register_tests,
|
||||
ARRAY_SIZE(siena_register_tests))
|
||||
? -1 : 1;
|
||||
|
||||
rc = efx_mcdi_reset(efx, reset_method);
|
||||
|
@ -184,7 +184,7 @@ static void siena_dimension_resources(struct efx_nic *efx)
|
|||
* the buffer table and descriptor caches. In theory we can
|
||||
* map both blocks to one port, but we don't.
|
||||
*/
|
||||
efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
|
||||
efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
|
||||
}
|
||||
|
||||
static int siena_probe_nic(struct efx_nic *efx)
|
||||
|
@ -200,7 +200,7 @@ static int siena_probe_nic(struct efx_nic *efx)
|
|||
return -ENOMEM;
|
||||
efx->nic_data = nic_data;
|
||||
|
||||
if (efx_nic_fpga_ver(efx) != 0) {
|
||||
if (efx_farch_fpga_ver(efx) != 0) {
|
||||
netif_err(efx, probe, efx->net_dev,
|
||||
"Siena FPGA not supported\n");
|
||||
rc = -ENODEV;
|
||||
|
@ -237,7 +237,8 @@ static int siena_probe_nic(struct efx_nic *efx)
|
|||
siena_init_wol(efx);
|
||||
|
||||
/* Allocate memory for INT_KER */
|
||||
rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
|
||||
rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
|
||||
GFP_KERNEL);
|
||||
if (rc)
|
||||
goto fail4;
|
||||
BUG_ON(efx->irq_status.dma_addr & 0x0f);
|
||||
|
@ -274,6 +275,7 @@ fail4:
|
|||
fail3:
|
||||
efx_mcdi_drv_attach(efx, false, NULL);
|
||||
fail2:
|
||||
efx_mcdi_fini(efx);
|
||||
fail1:
|
||||
kfree(efx->nic_data);
|
||||
return rc;
|
||||
|
@ -349,7 +351,7 @@ static int siena_init_nic(struct efx_nic *efx)
|
|||
EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
|
||||
efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
|
||||
|
||||
efx_nic_init_common(efx);
|
||||
efx_farch_init_common(efx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -367,6 +369,8 @@ static void siena_remove_nic(struct efx_nic *efx)
|
|||
/* Tear down the private nic state */
|
||||
kfree(efx->nic_data);
|
||||
efx->nic_data = NULL;
|
||||
|
||||
efx_mcdi_fini(efx);
|
||||
}
|
||||
|
||||
static int siena_try_update_nic_stats(struct efx_nic *efx)
|
||||
|
@ -574,6 +578,89 @@ static void siena_init_wol(struct efx_nic *efx)
|
|||
}
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* MCDI
|
||||
*
|
||||
**************************************************************************
|
||||
*/
|
||||
|
||||
#define MCDI_PDU(efx) \
|
||||
(efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
|
||||
#define MCDI_DOORBELL(efx) \
|
||||
(efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
|
||||
#define MCDI_STATUS(efx) \
|
||||
(efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
|
||||
|
||||
static void siena_mcdi_request(struct efx_nic *efx,
|
||||
const efx_dword_t *hdr, size_t hdr_len,
|
||||
const efx_dword_t *sdu, size_t sdu_len)
|
||||
{
|
||||
unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
|
||||
unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
|
||||
unsigned int i;
|
||||
unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4);
|
||||
|
||||
EFX_BUG_ON_PARANOID(hdr_len != 4);
|
||||
|
||||
efx_writed(efx, hdr, pdu);
|
||||
|
||||
for (i = 0; i < inlen_dw; i++)
|
||||
efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i);
|
||||
|
||||
/* Ensure the request is written out before the doorbell */
|
||||
wmb();
|
||||
|
||||
/* ring the doorbell with a distinctive value */
|
||||
_efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
|
||||
}
|
||||
|
||||
static bool siena_mcdi_poll_response(struct efx_nic *efx)
|
||||
{
|
||||
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
|
||||
efx_dword_t hdr;
|
||||
|
||||
efx_readd(efx, &hdr, pdu);
|
||||
|
||||
/* All 1's indicates that shared memory is in reset (and is
|
||||
* not a valid hdr). Wait for it to come out reset before
|
||||
* completing the command
|
||||
*/
|
||||
return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff &&
|
||||
EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
|
||||
}
|
||||
|
||||
static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
|
||||
size_t offset, size_t outlen)
|
||||
{
|
||||
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
|
||||
unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < outlen_dw; i++)
|
||||
efx_readd(efx, &outbuf[i], pdu + offset + 4 * i);
|
||||
}
|
||||
|
||||
static int siena_mcdi_poll_reboot(struct efx_nic *efx)
|
||||
{
|
||||
unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
|
||||
efx_dword_t reg;
|
||||
u32 value;
|
||||
|
||||
efx_readd(efx, ®, addr);
|
||||
value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
|
||||
|
||||
if (value == 0)
|
||||
return 0;
|
||||
|
||||
EFX_ZERO_DWORD(reg);
|
||||
efx_writed(efx, ®, addr);
|
||||
|
||||
if (value == MC_STATUS_DWORD_ASSERT)
|
||||
return -EINTR;
|
||||
else
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
|
@ -598,6 +685,7 @@ const struct efx_nic_type siena_a0_nic_type = {
|
|||
.reset = efx_mcdi_reset,
|
||||
.probe_port = efx_mcdi_port_probe,
|
||||
.remove_port = efx_mcdi_port_remove,
|
||||
.fini_dmaq = efx_farch_fini_dmaq,
|
||||
.prepare_flush = siena_prepare_flush,
|
||||
.finish_flush = siena_finish_flush,
|
||||
.update_stats = siena_update_nic_stats,
|
||||
|
@ -613,6 +701,32 @@ const struct efx_nic_type siena_a0_nic_type = {
|
|||
.resume_wol = siena_init_wol,
|
||||
.test_chip = siena_test_chip,
|
||||
.test_nvram = efx_mcdi_nvram_test_all,
|
||||
.mcdi_request = siena_mcdi_request,
|
||||
.mcdi_poll_response = siena_mcdi_poll_response,
|
||||
.mcdi_read_response = siena_mcdi_read_response,
|
||||
.mcdi_poll_reboot = siena_mcdi_poll_reboot,
|
||||
.irq_enable_master = efx_farch_irq_enable_master,
|
||||
.irq_test_generate = efx_farch_irq_test_generate,
|
||||
.irq_disable_non_ev = efx_farch_irq_disable_master,
|
||||
.irq_handle_msi = efx_farch_msi_interrupt,
|
||||
.irq_handle_legacy = efx_farch_legacy_interrupt,
|
||||
.tx_probe = efx_farch_tx_probe,
|
||||
.tx_init = efx_farch_tx_init,
|
||||
.tx_remove = efx_farch_tx_remove,
|
||||
.tx_write = efx_farch_tx_write,
|
||||
.rx_push_indir_table = efx_farch_rx_push_indir_table,
|
||||
.rx_probe = efx_farch_rx_probe,
|
||||
.rx_init = efx_farch_rx_init,
|
||||
.rx_remove = efx_farch_rx_remove,
|
||||
.rx_write = efx_farch_rx_write,
|
||||
.rx_defer_refill = efx_farch_rx_defer_refill,
|
||||
.ev_probe = efx_farch_ev_probe,
|
||||
.ev_init = efx_farch_ev_init,
|
||||
.ev_fini = efx_farch_ev_fini,
|
||||
.ev_remove = efx_farch_ev_remove,
|
||||
.ev_process = efx_farch_ev_process,
|
||||
.ev_read_ack = efx_farch_ev_read_ack,
|
||||
.ev_test_generate = efx_farch_ev_test_generate,
|
||||
|
||||
.revision = EFX_REV_SIENA_A0,
|
||||
.mem_map_size = (FR_CZ_MC_TREG_SMEM +
|
||||
|
@ -633,4 +747,5 @@ const struct efx_nic_type siena_a0_nic_type = {
|
|||
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
|
||||
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_RXHASH | NETIF_F_NTUPLE),
|
||||
.mcdi_max_ver = 1,
|
||||
};
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#include "mcdi.h"
|
||||
#include "filter.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "regs.h"
|
||||
#include "farch_regs.h"
|
||||
#include "vfdi.h"
|
||||
|
||||
/* Number of longs required to track all the VIs in a VF */
|
||||
|
@ -464,8 +464,9 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
|
|||
VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
|
||||
VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
|
||||
++vf->msg_seqno;
|
||||
efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx),
|
||||
&event);
|
||||
efx_farch_generate_event(efx,
|
||||
EFX_VI_BASE + vf->index * efx_vf_size(efx),
|
||||
&event);
|
||||
}
|
||||
|
||||
static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
|
||||
|
@ -997,7 +998,7 @@ static void efx_sriov_reset_vf_work(struct work_struct *work)
|
|||
struct efx_nic *efx = vf->efx;
|
||||
struct efx_buffer buf;
|
||||
|
||||
if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) {
|
||||
if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
|
||||
efx_sriov_reset_vf(vf, &buf);
|
||||
efx_nic_free_buffer(efx, &buf);
|
||||
}
|
||||
|
@ -1241,7 +1242,8 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
|
|||
pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
|
||||
PCI_SLOT(devfn), PCI_FUNC(devfn));
|
||||
|
||||
rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE);
|
||||
rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
|
||||
GFP_KERNEL);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
|
@ -1273,7 +1275,8 @@ int efx_sriov_init(struct efx_nic *efx)
|
|||
if (rc)
|
||||
goto fail_cmd;
|
||||
|
||||
rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status));
|
||||
rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status),
|
||||
GFP_KERNEL);
|
||||
if (rc)
|
||||
goto fail_status;
|
||||
vfdi_status = efx->vfdi_status.addr;
|
||||
|
@ -1528,7 +1531,7 @@ void efx_sriov_reset(struct efx_nic *efx)
|
|||
efx_sriov_usrev(efx, true);
|
||||
(void)efx_sriov_cmd(efx, true, NULL, NULL);
|
||||
|
||||
if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE))
|
||||
if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
|
||||
return;
|
||||
|
||||
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
|
||||
|
|
|
@ -543,10 +543,13 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
|
|||
tx_queue->initialised = true;
|
||||
}
|
||||
|
||||
void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
|
||||
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
|
||||
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
||||
"shutting down TX queue %d\n", tx_queue->queue);
|
||||
|
||||
if (!tx_queue->buffer)
|
||||
return;
|
||||
|
||||
|
@ -561,22 +564,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
|
|||
netdev_tx_reset_queue(tx_queue->core_txq);
|
||||
}
|
||||
|
||||
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
if (!tx_queue->initialised)
|
||||
return;
|
||||
|
||||
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
||||
"shutting down TX queue %d\n", tx_queue->queue);
|
||||
|
||||
tx_queue->initialised = false;
|
||||
|
||||
/* Flush TX queue, remove descriptor ring */
|
||||
efx_nic_fini_tx(tx_queue);
|
||||
|
||||
efx_release_tx_buffers(tx_queue);
|
||||
}
|
||||
|
||||
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
int i;
|
||||
|
@ -708,7 +695,8 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
|
|||
TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
|
||||
|
||||
if (unlikely(!page_buf->addr) &&
|
||||
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
|
||||
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
|
||||
GFP_ATOMIC))
|
||||
return NULL;
|
||||
|
||||
result = (u8 *)page_buf->addr + offset;
|
||||
|
|
Loading…
Reference in New Issue