Merge branch 'sfc-prerequisites-for-EF100-driver-part-2'
Edward Cree says: ==================== sfc: prerequisites for EF100 driver, part 2 Continuing on from [1], this series further prepares the sfc codebase for the introduction of the EF100 driver. [1]: https://lore.kernel.org/netdev/20200629.173812.1532344417590172093.davem@davemloft.net/T/ ==================== Acked-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a37675899c
|
@ -10,6 +10,7 @@
|
|||
#include "io.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "mcdi_port.h"
|
||||
#include "mcdi_port_common.h"
|
||||
#include "mcdi_functions.h"
|
||||
#include "nic.h"
|
||||
|
@ -553,8 +554,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
|
|||
|
||||
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
|
||||
|
||||
efx->vport_id = EVB_PORT_ID_ASSIGNED;
|
||||
|
||||
/* In case we're recovering from a crash (kexec), we want to
|
||||
* cancel any outstanding request by the previous user of this
|
||||
* function. We send a special message using the least
|
||||
|
|
|
@ -385,7 +385,6 @@ static int efx_probe_all(struct efx_nic *efx)
|
|||
rc = -EINVAL;
|
||||
goto fail3;
|
||||
}
|
||||
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
|
||||
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
rc = efx->type->vswitching_probe(efx);
|
||||
|
@ -593,109 +592,6 @@ int efx_net_stop(struct net_device *net_dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Context: netif_tx_lock held, BHs disabled. */
|
||||
static void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
netif_err(efx, tx_err, efx->net_dev,
|
||||
"TX stuck with port_enabled=%d: resetting channels\n",
|
||||
efx->port_enabled);
|
||||
|
||||
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
|
||||
}
|
||||
|
||||
static int efx_set_mac_address(struct net_device *net_dev, void *data)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct sockaddr *addr = data;
|
||||
u8 *new_addr = addr->sa_data;
|
||||
u8 old_addr[6];
|
||||
int rc;
|
||||
|
||||
if (!is_valid_ether_addr(new_addr)) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"invalid ethernet MAC address requested: %pM\n",
|
||||
new_addr);
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
/* save old address */
|
||||
ether_addr_copy(old_addr, net_dev->dev_addr);
|
||||
ether_addr_copy(net_dev->dev_addr, new_addr);
|
||||
if (efx->type->set_mac_address) {
|
||||
rc = efx->type->set_mac_address(efx);
|
||||
if (rc) {
|
||||
ether_addr_copy(net_dev->dev_addr, old_addr);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
/* Reconfigure the MAC */
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx_mac_reconfigure(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Context: netif_addr_lock held, BHs disabled. */
|
||||
static void efx_set_rx_mode(struct net_device *net_dev)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->port_enabled)
|
||||
queue_work(efx->workqueue, &efx->mac_work);
|
||||
/* Otherwise efx_start_port() will do this */
|
||||
}
|
||||
|
||||
static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
int rc;
|
||||
|
||||
/* If disabling RX n-tuple filtering, clear existing filters */
|
||||
if (net_dev->features & ~data & NETIF_F_NTUPLE) {
|
||||
rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* If Rx VLAN filter is changed, update filters via mac_reconfigure.
|
||||
* If rx-fcs is changed, mac_reconfigure updates that too.
|
||||
*/
|
||||
if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
|
||||
NETIF_F_RXFCS)) {
|
||||
/* efx_set_rx_mode() will schedule MAC work to update filters
|
||||
* when a new features are finally set in net_dev.
|
||||
*/
|
||||
efx_set_rx_mode(net_dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_get_phys_port_id(struct net_device *net_dev,
|
||||
struct netdev_phys_item_id *ppid)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->get_phys_port_id)
|
||||
return efx->type->get_phys_port_id(efx, ppid);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int efx_get_phys_port_name(struct net_device *net_dev,
|
||||
char *name, size_t len)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (snprintf(name, len, "p%u", efx->port_num) >= len)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
|
|
@ -36,13 +36,6 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
|
|||
__efx_rx_packet(channel);
|
||||
}
|
||||
|
||||
#define EFX_MAX_DMAQ_SIZE 4096UL
|
||||
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
|
||||
#define EFX_MIN_DMAQ_SIZE 512UL
|
||||
|
||||
#define EFX_MAX_EVQ_SIZE 16384UL
|
||||
#define EFX_MIN_EVQ_SIZE 512UL
|
||||
|
||||
/* Maximum number of TCP segments we support for soft-TSO */
|
||||
#define EFX_TSO_MAX_SEGS 100
|
||||
|
||||
|
@ -166,10 +159,6 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
|
|||
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
|
||||
unsigned int *rx_usecs, bool *rx_adaptive);
|
||||
|
||||
/* Dummy PHY ops for PHY drivers */
|
||||
int efx_port_dummy_op_int(struct efx_nic *efx);
|
||||
void efx_port_dummy_op_void(struct efx_nic *efx);
|
||||
|
||||
/* Update the generic software stats in the passed stats array */
|
||||
void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
|
||||
|
||||
|
@ -196,21 +185,6 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline void efx_schedule_channel(struct efx_channel *channel)
|
||||
{
|
||||
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
|
||||
"channel %d scheduling NAPI poll on CPU%d\n",
|
||||
channel->channel, raw_smp_processor_id());
|
||||
|
||||
napi_schedule(&channel->napi_str);
|
||||
}
|
||||
|
||||
static inline void efx_schedule_channel_irq(struct efx_channel *channel)
|
||||
{
|
||||
channel->event_test_cpu = raw_smp_processor_id();
|
||||
efx_schedule_channel(channel);
|
||||
}
|
||||
|
||||
static inline void efx_device_detach_sync(struct efx_nic *efx)
|
||||
{
|
||||
struct net_device *dev = efx->net_dev;
|
||||
|
|
|
@ -566,6 +566,9 @@ int efx_init_channels(struct efx_nic *efx)
|
|||
efx->interrupt_mode = min(efx->type->min_interrupt_mode,
|
||||
interrupt_mode);
|
||||
|
||||
efx->max_channels = EFX_MAX_CHANNELS;
|
||||
efx->max_tx_channels = EFX_MAX_CHANNELS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -162,6 +162,76 @@ static void efx_mac_work(struct work_struct *data)
|
|||
mutex_unlock(&efx->mac_lock);
|
||||
}
|
||||
|
||||
int efx_set_mac_address(struct net_device *net_dev, void *data)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct sockaddr *addr = data;
|
||||
u8 *new_addr = addr->sa_data;
|
||||
u8 old_addr[6];
|
||||
int rc;
|
||||
|
||||
if (!is_valid_ether_addr(new_addr)) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"invalid ethernet MAC address requested: %pM\n",
|
||||
new_addr);
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
/* save old address */
|
||||
ether_addr_copy(old_addr, net_dev->dev_addr);
|
||||
ether_addr_copy(net_dev->dev_addr, new_addr);
|
||||
if (efx->type->set_mac_address) {
|
||||
rc = efx->type->set_mac_address(efx);
|
||||
if (rc) {
|
||||
ether_addr_copy(net_dev->dev_addr, old_addr);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
/* Reconfigure the MAC */
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx_mac_reconfigure(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Context: netif_addr_lock held, BHs disabled. */
|
||||
void efx_set_rx_mode(struct net_device *net_dev)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->port_enabled)
|
||||
queue_work(efx->workqueue, &efx->mac_work);
|
||||
/* Otherwise efx_start_port() will do this */
|
||||
}
|
||||
|
||||
int efx_set_features(struct net_device *net_dev, netdev_features_t data)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
int rc;
|
||||
|
||||
/* If disabling RX n-tuple filtering, clear existing filters */
|
||||
if (net_dev->features & ~data & NETIF_F_NTUPLE) {
|
||||
rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* If Rx VLAN filter is changed, update filters via mac_reconfigure.
|
||||
* If rx-fcs is changed, mac_reconfigure updates that too.
|
||||
*/
|
||||
if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
|
||||
NETIF_F_RXFCS)) {
|
||||
/* efx_set_rx_mode() will schedule MAC work to update filters
|
||||
* when a new features are finally set in net_dev.
|
||||
*/
|
||||
efx_set_rx_mode(net_dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This ensures that the kernel is kept informed (via
|
||||
* netif_carrier_on/off) of the link status, and also maintains the
|
||||
* link status's stop on the port's TX queue.
|
||||
|
@ -650,6 +720,18 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
|
|||
efx->type->fini(efx);
|
||||
}
|
||||
|
||||
/* Context: netif_tx_lock held, BHs disabled. */
|
||||
void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
netif_err(efx, tx_err, efx->net_dev,
|
||||
"TX stuck with port_enabled=%d: resetting channels\n",
|
||||
efx->port_enabled);
|
||||
|
||||
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
|
||||
}
|
||||
|
||||
/* This function will always ensure that the locks acquired in
|
||||
* efx_reset_down() are released. A failure return code indicates
|
||||
* that we were unable to reinitialise the hardware, and the
|
||||
|
@ -936,6 +1018,7 @@ int efx_init_struct(struct efx_nic *efx,
|
|||
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
|
||||
INIT_LIST_HEAD(&efx->rss_context.list);
|
||||
mutex_init(&efx->rss_lock);
|
||||
efx->vport_id = EVB_PORT_ID_ASSIGNED;
|
||||
spin_lock_init(&efx->stats_lock);
|
||||
efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
|
||||
efx->num_mac_stats = MC_CMD_MAC_NSTATS;
|
||||
|
@ -953,6 +1036,9 @@ int efx_init_struct(struct efx_nic *efx,
|
|||
INIT_WORK(&efx->mac_work, efx_mac_work);
|
||||
init_waitqueue_head(&efx->flush_wq);
|
||||
|
||||
efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
|
||||
efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
|
||||
|
||||
efx->mem_bar = UINT_MAX;
|
||||
|
||||
rc = efx_init_channels(efx);
|
||||
|
@ -1221,3 +1307,23 @@ const struct pci_error_handlers efx_err_handlers = {
|
|||
.slot_reset = efx_io_slot_reset,
|
||||
.resume = efx_io_resume,
|
||||
};
|
||||
|
||||
int efx_get_phys_port_id(struct net_device *net_dev,
|
||||
struct netdev_phys_item_id *ppid)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->get_phys_port_id)
|
||||
return efx->type->get_phys_port_id(efx, ppid);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (snprintf(name, len, "p%u", efx->port_num) >= len)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,13 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
|
|||
struct net_device *net_dev);
|
||||
void efx_fini_struct(struct efx_nic *efx);
|
||||
|
||||
#define EFX_MAX_DMAQ_SIZE 4096UL
|
||||
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
|
||||
#define EFX_MIN_DMAQ_SIZE 512UL
|
||||
|
||||
#define EFX_MAX_EVQ_SIZE 16384UL
|
||||
#define EFX_MIN_EVQ_SIZE 512UL
|
||||
|
||||
void efx_link_clear_advertising(struct efx_nic *efx);
|
||||
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
|
||||
|
||||
|
@ -46,10 +53,15 @@ int efx_reconfigure_port(struct efx_nic *efx);
|
|||
|
||||
int efx_try_recovery(struct efx_nic *efx);
|
||||
void efx_reset_down(struct efx_nic *efx, enum reset_type method);
|
||||
void efx_watchdog(struct net_device *net_dev, unsigned int txqueue);
|
||||
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
|
||||
int efx_reset(struct efx_nic *efx, enum reset_type method);
|
||||
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
|
||||
|
||||
/* Dummy PHY ops for PHY drivers */
|
||||
int efx_port_dummy_op_int(struct efx_nic *efx);
|
||||
void efx_port_dummy_op_void(struct efx_nic *efx);
|
||||
|
||||
static inline int efx_check_disabled(struct efx_nic *efx)
|
||||
{
|
||||
if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
|
||||
|
@ -60,6 +72,21 @@ static inline int efx_check_disabled(struct efx_nic *efx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void efx_schedule_channel(struct efx_channel *channel)
|
||||
{
|
||||
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
|
||||
"channel %d scheduling NAPI poll on CPU%d\n",
|
||||
channel->channel, raw_smp_processor_id());
|
||||
|
||||
napi_schedule(&channel->napi_str);
|
||||
}
|
||||
|
||||
static inline void efx_schedule_channel_irq(struct efx_channel *channel)
|
||||
{
|
||||
channel->event_test_cpu = raw_smp_processor_id();
|
||||
efx_schedule_channel(channel);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_LOGGING
|
||||
void efx_init_mcdi_logging(struct efx_nic *efx);
|
||||
void efx_fini_mcdi_logging(struct efx_nic *efx);
|
||||
|
@ -69,9 +96,18 @@ static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {}
|
|||
#endif
|
||||
|
||||
void efx_mac_reconfigure(struct efx_nic *efx);
|
||||
int efx_set_mac_address(struct net_device *net_dev, void *data);
|
||||
void efx_set_rx_mode(struct net_device *net_dev);
|
||||
int efx_set_features(struct net_device *net_dev, netdev_features_t data);
|
||||
void efx_link_status_changed(struct efx_nic *efx);
|
||||
unsigned int efx_xdp_max_mtu(struct efx_nic *efx);
|
||||
int efx_change_mtu(struct net_device *net_dev, int new_mtu);
|
||||
|
||||
extern const struct pci_error_handlers efx_err_handlers;
|
||||
|
||||
int efx_get_phys_port_id(struct net_device *net_dev,
|
||||
struct netdev_phys_item_id *ppid);
|
||||
|
||||
int efx_get_phys_port_name(struct net_device *net_dev,
|
||||
char *name, size_t len);
|
||||
#endif
|
||||
|
|
|
@ -354,15 +354,11 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
|
|||
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
|
||||
int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
|
||||
int efx_mcdi_flush_rxqs(struct efx_nic *efx);
|
||||
int efx_mcdi_port_probe(struct efx_nic *efx);
|
||||
void efx_mcdi_port_remove(struct efx_nic *efx);
|
||||
int efx_mcdi_port_reconfigure(struct efx_nic *efx);
|
||||
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
|
||||
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
|
||||
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
|
||||
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
|
||||
void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
|
||||
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
|
||||
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
|
||||
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
|
||||
int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
|
||||
|
|
|
@ -1,3 +1,14 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2018 Solarflare Communications Inc.
|
||||
* Copyright 2019-2020 Xilinx Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include "mcdi_filters.h"
|
||||
#include "mcdi.h"
|
||||
#include "nic.h"
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <linux/slab.h>
|
||||
#include "efx.h"
|
||||
#include "mcdi_port.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "nic.h"
|
||||
|
@ -175,19 +176,6 @@ fail:
|
|||
return rc;
|
||||
}
|
||||
|
||||
int efx_mcdi_port_reconfigure(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
|
||||
u32 caps = (efx->link_advertising[0] ?
|
||||
ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
|
||||
phy_cfg->forced_cap);
|
||||
|
||||
caps |= ethtool_fec_caps_to_mcdi(efx->fec_config);
|
||||
|
||||
return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
|
||||
efx->loopback_mode, 0);
|
||||
}
|
||||
|
||||
static void efx_mcdi_phy_remove(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
|
||||
|
@ -691,80 +679,6 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
|
|||
return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
|
||||
}
|
||||
|
||||
enum efx_stats_action {
|
||||
EFX_STATS_ENABLE,
|
||||
EFX_STATS_DISABLE,
|
||||
EFX_STATS_PULL,
|
||||
};
|
||||
|
||||
static int efx_mcdi_mac_stats(struct efx_nic *efx,
|
||||
enum efx_stats_action action, int clear)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
|
||||
int rc;
|
||||
int change = action == EFX_STATS_PULL ? 0 : 1;
|
||||
int enable = action == EFX_STATS_ENABLE ? 1 : 0;
|
||||
int period = action == EFX_STATS_ENABLE ? 1000 : 0;
|
||||
dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
|
||||
u32 dma_len = action != EFX_STATS_DISABLE ?
|
||||
efx->num_mac_stats * sizeof(u64) : 0;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
|
||||
|
||||
MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
|
||||
MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
|
||||
MAC_STATS_IN_DMA, !!enable,
|
||||
MAC_STATS_IN_CLEAR, clear,
|
||||
MAC_STATS_IN_PERIODIC_CHANGE, change,
|
||||
MAC_STATS_IN_PERIODIC_ENABLE, enable,
|
||||
MAC_STATS_IN_PERIODIC_CLEAR, 0,
|
||||
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
|
||||
MAC_STATS_IN_PERIOD_MS, period);
|
||||
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
|
||||
|
||||
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
|
||||
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id);
|
||||
|
||||
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
|
||||
NULL, 0, NULL);
|
||||
/* Expect ENOENT if DMA queues have not been set up */
|
||||
if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
|
||||
efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
|
||||
NULL, 0, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_mcdi_mac_start_stats(struct efx_nic *efx)
|
||||
{
|
||||
__le64 *dma_stats = efx->stats_buffer.addr;
|
||||
|
||||
dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
|
||||
|
||||
efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
|
||||
}
|
||||
|
||||
void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
|
||||
{
|
||||
efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
|
||||
}
|
||||
|
||||
#define EFX_MAC_STATS_WAIT_US 100
|
||||
#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
|
||||
|
||||
void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
|
||||
{
|
||||
__le64 *dma_stats = efx->stats_buffer.addr;
|
||||
int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
|
||||
|
||||
dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
|
||||
efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
|
||||
|
||||
while (dma_stats[efx->num_mac_stats - 1] ==
|
||||
EFX_MC_STATS_GENERATION_INVALID &&
|
||||
attempts-- != 0)
|
||||
udelay(EFX_MAC_STATS_WAIT_US);
|
||||
}
|
||||
|
||||
int efx_mcdi_port_probe(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
|
@ -782,24 +696,11 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
|
|||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
/* Allocate buffer for stats */
|
||||
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
|
||||
efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
|
||||
if (rc)
|
||||
return rc;
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
"stats buffer at %llx (virt %p phys %llx)\n",
|
||||
(u64)efx->stats_buffer.dma_addr,
|
||||
efx->stats_buffer.addr,
|
||||
(u64)virt_to_phys(efx->stats_buffer.addr));
|
||||
|
||||
efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1);
|
||||
|
||||
return 0;
|
||||
return efx_mcdi_mac_init_stats(efx);
|
||||
}
|
||||
|
||||
void efx_mcdi_port_remove(struct efx_nic *efx)
|
||||
{
|
||||
efx->phy_op->remove(efx);
|
||||
efx_nic_free_buffer(efx, &efx->stats_buffer);
|
||||
efx_mcdi_mac_fini_stats(efx);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2008-2013 Solarflare Communications Inc.
|
||||
* Copyright 2019-2020 Xilinx Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_MCDI_PORT_H
|
||||
#define EFX_MCDI_PORT_H
|
||||
|
||||
#include "net_driver.h"
|
||||
|
||||
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
|
||||
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
|
||||
int efx_mcdi_port_probe(struct efx_nic *efx);
|
||||
void efx_mcdi_port_remove(struct efx_nic *efx);
|
||||
|
||||
#endif /* EFX_MCDI_PORT_H */
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include "mcdi_port_common.h"
|
||||
#include "efx_common.h"
|
||||
#include "nic.h"
|
||||
|
||||
int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
|
||||
{
|
||||
|
@ -475,6 +476,24 @@ int efx_mcdi_phy_test_alive(struct efx_nic *efx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int efx_mcdi_port_reconfigure(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
|
||||
u32 caps = (efx->link_advertising[0] ?
|
||||
ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
|
||||
phy_cfg->forced_cap);
|
||||
|
||||
caps |= ethtool_fec_caps_to_mcdi(efx->fec_config);
|
||||
|
||||
return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
|
||||
efx->loopback_mode, 0);
|
||||
}
|
||||
|
||||
static unsigned int efx_calc_mac_mtu(struct efx_nic *efx)
|
||||
{
|
||||
return EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
|
||||
}
|
||||
|
||||
int efx_mcdi_set_mac(struct efx_nic *efx)
|
||||
{
|
||||
u32 fcntl;
|
||||
|
@ -486,8 +505,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
|
|||
ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
|
||||
efx->net_dev->dev_addr);
|
||||
|
||||
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
|
||||
EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
|
||||
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, efx_calc_mac_mtu(efx));
|
||||
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
|
||||
|
||||
/* Set simple MAC filter for Siena */
|
||||
|
@ -520,6 +538,125 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
|
|||
NULL, 0, NULL);
|
||||
}
|
||||
|
||||
int efx_mcdi_set_mtu(struct efx_nic *efx)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MAC_EXT_IN_LEN);
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
|
||||
|
||||
MCDI_SET_DWORD(inbuf, SET_MAC_EXT_IN_MTU, efx_calc_mac_mtu(efx));
|
||||
|
||||
MCDI_POPULATE_DWORD_1(inbuf, SET_MAC_EXT_IN_CONTROL,
|
||||
SET_MAC_EXT_IN_CFG_MTU, 1);
|
||||
|
||||
return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, inbuf, sizeof(inbuf),
|
||||
NULL, 0, NULL);
|
||||
}
|
||||
|
||||
enum efx_stats_action {
|
||||
EFX_STATS_ENABLE,
|
||||
EFX_STATS_DISABLE,
|
||||
EFX_STATS_PULL,
|
||||
};
|
||||
|
||||
static int efx_mcdi_mac_stats(struct efx_nic *efx,
|
||||
enum efx_stats_action action, int clear)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
|
||||
int rc;
|
||||
int change = action == EFX_STATS_PULL ? 0 : 1;
|
||||
int enable = action == EFX_STATS_ENABLE ? 1 : 0;
|
||||
int period = action == EFX_STATS_ENABLE ? 1000 : 0;
|
||||
dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
|
||||
u32 dma_len = action != EFX_STATS_DISABLE ?
|
||||
efx->num_mac_stats * sizeof(u64) : 0;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
|
||||
|
||||
MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
|
||||
MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
|
||||
MAC_STATS_IN_DMA, !!enable,
|
||||
MAC_STATS_IN_CLEAR, clear,
|
||||
MAC_STATS_IN_PERIODIC_CHANGE, change,
|
||||
MAC_STATS_IN_PERIODIC_ENABLE, enable,
|
||||
MAC_STATS_IN_PERIODIC_CLEAR, 0,
|
||||
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
|
||||
MAC_STATS_IN_PERIOD_MS, period);
|
||||
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
|
||||
|
||||
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
|
||||
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id);
|
||||
|
||||
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
|
||||
NULL, 0, NULL);
|
||||
/* Expect ENOENT if DMA queues have not been set up */
|
||||
if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
|
||||
efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
|
||||
NULL, 0, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_mcdi_mac_start_stats(struct efx_nic *efx)
|
||||
{
|
||||
__le64 *dma_stats = efx->stats_buffer.addr;
|
||||
|
||||
dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
|
||||
|
||||
efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
|
||||
}
|
||||
|
||||
void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
|
||||
{
|
||||
efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
|
||||
}
|
||||
|
||||
#define EFX_MAC_STATS_WAIT_US 100
|
||||
#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
|
||||
|
||||
void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
|
||||
{
|
||||
__le64 *dma_stats = efx->stats_buffer.addr;
|
||||
int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
|
||||
|
||||
dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
|
||||
efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
|
||||
|
||||
while (dma_stats[efx->num_mac_stats - 1] ==
|
||||
EFX_MC_STATS_GENERATION_INVALID &&
|
||||
attempts-- != 0)
|
||||
udelay(EFX_MAC_STATS_WAIT_US);
|
||||
}
|
||||
|
||||
int efx_mcdi_mac_init_stats(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!efx->num_mac_stats)
|
||||
return 0;
|
||||
|
||||
/* Allocate buffer for stats */
|
||||
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
|
||||
efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
|
||||
if (rc) {
|
||||
netif_warn(efx, probe, efx->net_dev,
|
||||
"failed to allocate DMA buffer: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
"stats buffer at %llx (virt %p phys %llx)\n",
|
||||
(u64) efx->stats_buffer.dma_addr,
|
||||
efx->stats_buffer.addr,
|
||||
(u64) virt_to_phys(efx->stats_buffer.addr));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void efx_mcdi_mac_fini_stats(struct efx_nic *efx)
|
||||
{
|
||||
efx_nic_free_buffer(efx, &efx->stats_buffer);
|
||||
}
|
||||
|
||||
/* Get physical port number (EF10 only; on Siena it is same as PF number) */
|
||||
int efx_mcdi_port_get_number(struct efx_nic *efx)
|
||||
{
|
||||
|
|
|
@ -28,8 +28,6 @@ struct efx_mcdi_phy_data {
|
|||
u32 forced_cap;
|
||||
};
|
||||
|
||||
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
|
||||
|
||||
int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg);
|
||||
void efx_link_set_advertising(struct efx_nic *efx,
|
||||
const unsigned long *advertising);
|
||||
|
@ -51,6 +49,9 @@ int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
|
|||
struct ethtool_fecparam *fec);
|
||||
int efx_mcdi_phy_test_alive(struct efx_nic *efx);
|
||||
int efx_mcdi_set_mac(struct efx_nic *efx);
|
||||
int efx_mcdi_set_mtu(struct efx_nic *efx);
|
||||
int efx_mcdi_mac_init_stats(struct efx_nic *efx);
|
||||
void efx_mcdi_mac_fini_stats(struct efx_nic *efx);
|
||||
int efx_mcdi_port_get_number(struct efx_nic *efx);
|
||||
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include "farch_regs.h"
|
||||
#include "io.h"
|
||||
#include "workarounds.h"
|
||||
#include "mcdi_port_common.h"
|
||||
#include "mcdi_pcol.h"
|
||||
|
||||
/**************************************************************************
|
||||
|
|
|
@ -306,9 +306,6 @@ extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
|
|||
|
||||
int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
|
||||
|
||||
int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
bool *data_mapped);
|
||||
|
||||
/* Falcon/Siena queue operations */
|
||||
int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
|
||||
|
|
|
@ -110,6 +110,9 @@ static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
|
|||
efx_nic_tx_is_empty(partner);
|
||||
}
|
||||
|
||||
int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
bool *data_mapped);
|
||||
|
||||
/* Decide whether to push a TX descriptor to the NIC vs merely writing
|
||||
* the doorbell. This can reduce latency when we are adding a single
|
||||
* descriptor to an empty queue, but is otherwise pointless. Further,
|
||||
|
@ -160,7 +163,8 @@ static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
|
|||
}
|
||||
static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
tx_queue->efx->type->tx_remove(tx_queue);
|
||||
if (tx_queue->efx->type->tx_remove)
|
||||
tx_queue->efx->type->tx_remove(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
|
@ -260,6 +264,8 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
|
|||
size_t efx_nic_get_regs_len(struct efx_nic *efx);
|
||||
void efx_nic_get_regs(struct efx_nic *efx, void *buf);
|
||||
|
||||
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
|
||||
|
||||
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask, u8 *names);
|
||||
int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest);
|
||||
|
|
|
@ -40,14 +40,6 @@
|
|||
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
|
||||
EFX_RX_USR_BUF_SIZE)
|
||||
|
||||
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int len)
|
||||
{
|
||||
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
int len)
|
||||
|
|
|
@ -57,6 +57,15 @@ void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
|
|||
unsigned int page_offset,
|
||||
u16 flags);
|
||||
void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
|
||||
|
||||
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int len)
|
||||
{
|
||||
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int num_bufs);
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "workarounds.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "mcdi_port.h"
|
||||
#include "mcdi_port_common.h"
|
||||
#include "selftest.h"
|
||||
#include "siena_sriov.h"
|
||||
|
|
|
@ -268,34 +268,6 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
|
|||
}
|
||||
#endif /* EFX_USE_PIO */
|
||||
|
||||
/*
|
||||
* Fallback to software TSO.
|
||||
*
|
||||
* This is used if we are unable to send a GSO packet through hardware TSO.
|
||||
* This should only ever happen due to per-queue restrictions - unsupported
|
||||
* packets should first be filtered by the feature flags.
|
||||
*
|
||||
* Returns 0 on success, error code otherwise.
|
||||
*/
|
||||
static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *segments, *next;
|
||||
|
||||
segments = skb_gso_segment(skb, 0);
|
||||
if (IS_ERR(segments))
|
||||
return PTR_ERR(segments);
|
||||
|
||||
dev_consume_skb_any(skb);
|
||||
|
||||
skb_list_walk_safe(segments, skb, next) {
|
||||
skb_mark_not_on_list(skb);
|
||||
efx_enqueue_skb(tx_queue, skb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a socket buffer to a TX queue
|
||||
*
|
||||
|
|
|
@ -18,7 +18,4 @@ unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
|
|||
u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer, size_t len);
|
||||
|
||||
int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
bool *data_mapped);
|
||||
|
||||
#endif /* EFX_TX_H */
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "nic.h"
|
||||
#include "nic_common.h"
|
||||
#include "tx_common.h"
|
||||
|
||||
static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
|
||||
|
@ -311,6 +311,20 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
|
|||
return buffer;
|
||||
}
|
||||
|
||||
int efx_tx_tso_header_length(struct sk_buff *skb)
|
||||
{
|
||||
size_t header_len;
|
||||
|
||||
if (skb->encapsulation)
|
||||
header_len = skb_inner_transport_header(skb) -
|
||||
skb->data +
|
||||
(inner_tcp_hdr(skb)->doff << 2u);
|
||||
else
|
||||
header_len = skb_transport_header(skb) - skb->data +
|
||||
(tcp_hdr(skb)->doff << 2u);
|
||||
return header_len;
|
||||
}
|
||||
|
||||
/* Map all data from an SKB for DMA and create descriptors on the queue. */
|
||||
int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
unsigned int segment_count)
|
||||
|
@ -339,8 +353,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
|||
/* For TSO we need to put the header in to a separate
|
||||
* descriptor. Map this separately if necessary.
|
||||
*/
|
||||
size_t header_len = skb_transport_header(skb) - skb->data +
|
||||
(tcp_hdr(skb)->doff << 2u);
|
||||
size_t header_len = efx_tx_tso_header_length(skb);
|
||||
|
||||
if (header_len != len) {
|
||||
tx_queue->tso_long_headers++;
|
||||
|
@ -405,3 +418,30 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
|
|||
|
||||
return max_descs;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback to software TSO.
|
||||
*
|
||||
* This is used if we are unable to send a GSO packet through hardware TSO.
|
||||
* This should only ever happen due to per-queue restrictions - unsupported
|
||||
* packets should first be filtered by the feature flags.
|
||||
*
|
||||
* Returns 0 on success, error code otherwise.
|
||||
*/
|
||||
int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *segments, *next;
|
||||
|
||||
segments = skb_gso_segment(skb, 0);
|
||||
if (IS_ERR(segments))
|
||||
return PTR_ERR(segments);
|
||||
|
||||
dev_consume_skb_any(skb);
|
||||
|
||||
skb_list_walk_safe(segments, skb, next) {
|
||||
skb_mark_not_on_list(skb);
|
||||
efx_enqueue_skb(tx_queue, skb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -34,9 +34,10 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
|||
|
||||
struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr, size_t len);
|
||||
int efx_tx_tso_header_length(struct sk_buff *skb);
|
||||
int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
unsigned int segment_count);
|
||||
|
||||
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
|
||||
|
||||
int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue