Merge branch 'sfc-vf-representors-for-ef100-rx-side'
Edward Cree says: ==================== sfc: VF representors for EF100 - RX side This series adds the receive path for EF100 VF representors, plus other minor features such as statistics. ==================== Link: https://lore.kernel.org/r/cover.1659034549.git.ecree.xilinx@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
ed3849e429
|
@ -8,7 +8,8 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
|
|||
ef100.o ef100_nic.o ef100_netdev.o \
|
||||
ef100_ethtool.o ef100_rx.o ef100_tx.o
|
||||
sfc-$(CONFIG_SFC_MTD) += mtd.o
|
||||
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o mae.o
|
||||
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
|
||||
mae.o tc.o
|
||||
|
||||
obj-$(CONFIG_SFC) += sfc.o
|
||||
|
||||
|
|
|
@ -2538,23 +2538,33 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
|
|||
|
||||
if (rc)
|
||||
return rc;
|
||||
down_write(&efx->filter_sem);
|
||||
rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out_unlock;
|
||||
|
||||
list_for_each_entry(vlan, &nic_data->vlan_list, list) {
|
||||
rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
|
||||
if (rc)
|
||||
goto fail_add_vlan;
|
||||
}
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
|
||||
fail_add_vlan:
|
||||
efx_mcdi_filter_table_remove(efx);
|
||||
out_unlock:
|
||||
up_write(&efx->filter_sem);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void efx_ef10_filter_table_remove(struct efx_nic *efx)
|
||||
{
|
||||
down_write(&efx->filter_sem);
|
||||
efx_mcdi_filter_table_remove(efx);
|
||||
up_write(&efx->filter_sem);
|
||||
}
|
||||
|
||||
/* This creates an entry in the RX descriptor queue */
|
||||
static inline void
|
||||
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
|
||||
|
@ -3211,9 +3221,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
|
|||
|
||||
efx_device_detach_sync(efx);
|
||||
efx_net_stop(efx->net_dev);
|
||||
down_write(&efx->filter_sem);
|
||||
efx_mcdi_filter_table_remove(efx);
|
||||
up_write(&efx->filter_sem);
|
||||
efx_ef10_filter_table_remove(efx);
|
||||
|
||||
rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
|
||||
if (rc)
|
||||
|
@ -3243,9 +3251,7 @@ restore_vadaptor:
|
|||
if (rc2)
|
||||
goto reset_nic;
|
||||
restore_filters:
|
||||
down_write(&efx->filter_sem);
|
||||
rc2 = efx_ef10_filter_table_probe(efx);
|
||||
up_write(&efx->filter_sem);
|
||||
if (rc2)
|
||||
goto reset_nic;
|
||||
|
||||
|
@ -3275,8 +3281,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
|
|||
efx_net_stop(efx->net_dev);
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
down_write(&efx->filter_sem);
|
||||
efx_mcdi_filter_table_remove(efx);
|
||||
efx_ef10_filter_table_remove(efx);
|
||||
|
||||
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
|
||||
efx->net_dev->dev_addr);
|
||||
|
@ -3286,7 +3291,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
|
|||
sizeof(inbuf), NULL, 0, NULL);
|
||||
|
||||
efx_ef10_filter_table_probe(efx);
|
||||
up_write(&efx->filter_sem);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
if (was_enabled)
|
||||
|
@ -4092,7 +4096,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
|
|||
.ev_test_generate = efx_ef10_ev_test_generate,
|
||||
.filter_table_probe = efx_ef10_filter_table_probe,
|
||||
.filter_table_restore = efx_mcdi_filter_table_restore,
|
||||
.filter_table_remove = efx_mcdi_filter_table_remove,
|
||||
.filter_table_remove = efx_ef10_filter_table_remove,
|
||||
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
|
||||
.filter_insert = efx_mcdi_filter_insert,
|
||||
.filter_remove_safe = efx_mcdi_filter_remove_safe,
|
||||
|
|
|
@ -431,6 +431,9 @@ static void ef100_pci_remove(struct pci_dev *pci_dev)
|
|||
|
||||
probe_data = container_of(efx, struct efx_probe_data, efx);
|
||||
ef100_remove_netdev(probe_data);
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
efx_fini_struct_tc(efx);
|
||||
#endif
|
||||
|
||||
ef100_remove(efx);
|
||||
efx_fini_io(efx);
|
||||
|
|
|
@ -329,6 +329,10 @@ void ef100_remove_netdev(struct efx_probe_data *probe_data)
|
|||
|
||||
ef100_unregister_netdev(efx);
|
||||
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
efx_fini_tc(efx);
|
||||
#endif
|
||||
|
||||
down_write(&efx->filter_sem);
|
||||
efx_mcdi_filter_table_remove(efx);
|
||||
up_write(&efx->filter_sem);
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#include "ef100_tx.h"
|
||||
#include "ef100_sriov.h"
|
||||
#include "ef100_netdev.h"
|
||||
#include "tc.h"
|
||||
#include "mae.h"
|
||||
#include "rx_common.h"
|
||||
|
||||
#define EF100_MAX_VIS 4096
|
||||
|
@ -374,26 +376,46 @@ static int ef100_filter_table_up(struct efx_nic *efx)
|
|||
{
|
||||
int rc;
|
||||
|
||||
down_write(&efx->filter_sem);
|
||||
rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
|
||||
if (rc) {
|
||||
efx_mcdi_filter_table_down(efx);
|
||||
return rc;
|
||||
}
|
||||
if (rc)
|
||||
goto fail_unspec;
|
||||
|
||||
rc = efx_mcdi_filter_add_vlan(efx, 0);
|
||||
if (rc) {
|
||||
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
|
||||
efx_mcdi_filter_table_down(efx);
|
||||
}
|
||||
if (rc)
|
||||
goto fail_vlan0;
|
||||
/* Drop the lock: we've finished altering table existence, and
|
||||
* filter insertion will need to take the lock for read.
|
||||
*/
|
||||
up_write(&efx->filter_sem);
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
rc = efx_tc_insert_rep_filters(efx);
|
||||
/* Rep filter failure is nonfatal */
|
||||
if (rc)
|
||||
netif_warn(efx, drv, efx->net_dev,
|
||||
"Failed to insert representor filters, rc %d\n",
|
||||
rc);
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
fail_vlan0:
|
||||
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
|
||||
fail_unspec:
|
||||
efx_mcdi_filter_table_down(efx);
|
||||
up_write(&efx->filter_sem);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void ef100_filter_table_down(struct efx_nic *efx)
|
||||
{
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
efx_tc_remove_rep_filters(efx);
|
||||
#endif
|
||||
down_write(&efx->filter_sem);
|
||||
efx_mcdi_filter_del_vlan(efx, 0);
|
||||
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
|
||||
efx_mcdi_filter_table_down(efx);
|
||||
up_write(&efx->filter_sem);
|
||||
}
|
||||
|
||||
/* Other
|
||||
|
@ -704,6 +726,31 @@ static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
|
|||
return 10 * EFX_RECYCLE_RING_SIZE_10G;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
static int efx_ef100_get_base_mport(struct efx_nic *efx)
|
||||
{
|
||||
struct ef100_nic_data *nic_data = efx->nic_data;
|
||||
u32 selector, id;
|
||||
int rc;
|
||||
|
||||
/* Construct mport selector for "physical network port" */
|
||||
efx_mae_mport_wire(efx, &selector);
|
||||
/* Look up actual mport ID */
|
||||
rc = efx_mae_lookup_mport(efx, selector, &id);
|
||||
if (rc)
|
||||
return rc;
|
||||
/* The ID should always fit in 16 bits, because that's how wide the
|
||||
* corresponding fields in the RX prefix & TX override descriptor are
|
||||
*/
|
||||
if (id >> 16)
|
||||
netif_warn(efx, probe, efx->net_dev, "Bad base m-port id %#x\n",
|
||||
id);
|
||||
nic_data->base_mport = id;
|
||||
nic_data->have_mport = true;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int compare_versions(const char *a, const char *b)
|
||||
{
|
||||
int a_major, a_minor, a_point, a_patch;
|
||||
|
@ -1064,6 +1111,34 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
|
|||
eth_hw_addr_set(net_dev, net_dev->perm_addr);
|
||||
memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
|
||||
|
||||
if (!nic_data->grp_mae)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
rc = efx_init_struct_tc(efx);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = efx_ef100_get_base_mport(efx);
|
||||
if (rc) {
|
||||
netif_warn(efx, probe, net_dev,
|
||||
"Failed to probe base mport rc %d; representors will not function\n",
|
||||
rc);
|
||||
}
|
||||
|
||||
rc = efx_init_tc(efx);
|
||||
if (rc) {
|
||||
/* Either we don't have an MAE at all (i.e. legacy v-switching),
|
||||
* or we do but we failed to probe it. In the latter case, we
|
||||
* may not have set up default rules, in which case we won't be
|
||||
* able to pass any traffic. However, we don't fail the probe,
|
||||
* because the user might need to use the netdevice to apply
|
||||
* configuration changes to fix whatever's wrong with the MAE.
|
||||
*/
|
||||
netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n",
|
||||
rc);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
|
|
|
@ -72,6 +72,8 @@ struct ef100_nic_data {
|
|||
u8 port_id[ETH_ALEN];
|
||||
DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS);
|
||||
u64 stats[EF100_STAT_COUNT];
|
||||
u32 base_mport;
|
||||
bool have_mport; /* base_mport was populated successfully */
|
||||
bool grp_mae; /* MAE Privilege */
|
||||
u16 tso_max_hdr_len;
|
||||
u16 tso_max_payload_num_segs;
|
||||
|
|
|
@ -13,15 +13,24 @@
|
|||
#include "ef100_netdev.h"
|
||||
#include "ef100_nic.h"
|
||||
#include "mae.h"
|
||||
#include "rx_common.h"
|
||||
|
||||
#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
|
||||
|
||||
#define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
|
||||
|
||||
static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
|
||||
|
||||
static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
|
||||
unsigned int i)
|
||||
{
|
||||
efv->parent = efx;
|
||||
efv->idx = i;
|
||||
INIT_LIST_HEAD(&efv->list);
|
||||
efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
|
||||
INIT_LIST_HEAD(&efv->dflt.acts.list);
|
||||
INIT_LIST_HEAD(&efv->rx_list);
|
||||
spin_lock_init(&efv->rx_lock);
|
||||
efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
|
||||
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
|
||||
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
|
||||
|
@ -29,6 +38,25 @@ static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int efx_ef100_rep_open(struct net_device *net_dev)
|
||||
{
|
||||
struct efx_rep *efv = netdev_priv(net_dev);
|
||||
|
||||
netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
|
||||
NAPI_POLL_WEIGHT);
|
||||
napi_enable(&efv->napi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_ef100_rep_close(struct net_device *net_dev)
|
||||
{
|
||||
struct efx_rep *efv = netdev_priv(net_dev);
|
||||
|
||||
napi_disable(&efv->napi);
|
||||
netif_napi_del(&efv->napi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
|
@ -79,10 +107,26 @@ static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void efx_ef100_rep_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct efx_rep *efv = netdev_priv(dev);
|
||||
|
||||
stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
|
||||
stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
|
||||
stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
|
||||
stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
|
||||
stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
|
||||
stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
|
||||
}
|
||||
|
||||
static const struct net_device_ops efx_ef100_rep_netdev_ops = {
|
||||
.ndo_open = efx_ef100_rep_open,
|
||||
.ndo_stop = efx_ef100_rep_close,
|
||||
.ndo_start_xmit = efx_ef100_rep_xmit,
|
||||
.ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
|
||||
.ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
|
||||
.ndo_get_stats64 = efx_ef100_rep_get_stats64,
|
||||
};
|
||||
|
||||
static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
|
||||
|
@ -106,10 +150,37 @@ static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
|
|||
efv->msg_enable = msg_enable;
|
||||
}
|
||||
|
||||
static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
|
||||
struct ethtool_ringparam *ring,
|
||||
struct kernel_ethtool_ringparam *kring,
|
||||
struct netlink_ext_ack *ext_ack)
|
||||
{
|
||||
struct efx_rep *efv = netdev_priv(net_dev);
|
||||
|
||||
ring->rx_max_pending = U32_MAX;
|
||||
ring->rx_pending = efv->rx_pring_size;
|
||||
}
|
||||
|
||||
static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
|
||||
struct ethtool_ringparam *ring,
|
||||
struct kernel_ethtool_ringparam *kring,
|
||||
struct netlink_ext_ack *ext_ack)
|
||||
{
|
||||
struct efx_rep *efv = netdev_priv(net_dev);
|
||||
|
||||
if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
|
||||
return -EINVAL;
|
||||
|
||||
efv->rx_pring_size = ring->rx_pending;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
|
||||
.get_drvinfo = efx_ef100_rep_get_drvinfo,
|
||||
.get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
|
||||
.set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
|
||||
.get_ringparam = efx_ef100_rep_ethtool_get_ringparam,
|
||||
.set_ringparam = efx_ef100_rep_ethtool_set_ringparam,
|
||||
};
|
||||
|
||||
static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
|
||||
|
@ -159,6 +230,7 @@ static int efx_ef100_configure_rep(struct efx_rep *efv)
|
|||
u32 selector;
|
||||
int rc;
|
||||
|
||||
efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
|
||||
/* Construct mport selector for corresponding VF */
|
||||
efx_mae_mport_vf(efx, efv->idx, &selector);
|
||||
/* Look up actual mport ID */
|
||||
|
@ -169,7 +241,14 @@ static int efx_ef100_configure_rep(struct efx_rep *efv)
|
|||
/* mport label should fit in 16 bits */
|
||||
WARN_ON(efv->mport >> 16);
|
||||
|
||||
return 0;
|
||||
return efx_tc_configure_default_rule_rep(efv);
|
||||
}
|
||||
|
||||
static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
|
||||
{
|
||||
struct efx_nic *efx = efv->parent;
|
||||
|
||||
efx_tc_deconfigure_default_rule(efx, &efv->dflt);
|
||||
}
|
||||
|
||||
static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
|
||||
|
@ -181,6 +260,7 @@ static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
|
|||
list_del(&efv->list);
|
||||
spin_unlock_bh(&efx->vf_reps_lock);
|
||||
rtnl_unlock();
|
||||
synchronize_rcu();
|
||||
free_netdev(efv->net_dev);
|
||||
}
|
||||
|
||||
|
@ -202,19 +282,21 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
|
|||
pci_err(efx->pci_dev,
|
||||
"Failed to configure representor for VF %d, rc %d\n",
|
||||
i, rc);
|
||||
goto fail;
|
||||
goto fail1;
|
||||
}
|
||||
rc = register_netdev(efv->net_dev);
|
||||
if (rc) {
|
||||
pci_err(efx->pci_dev,
|
||||
"Failed to register representor for VF %d, rc %d\n",
|
||||
i, rc);
|
||||
goto fail;
|
||||
goto fail2;
|
||||
}
|
||||
pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
|
||||
efv->net_dev->name);
|
||||
return 0;
|
||||
fail:
|
||||
fail2:
|
||||
efx_ef100_deconfigure_rep(efv);
|
||||
fail1:
|
||||
efx_ef100_rep_destroy_netdev(efv);
|
||||
return rc;
|
||||
}
|
||||
|
@ -228,6 +310,7 @@ void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
|
|||
return;
|
||||
netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
|
||||
unregister_netdev(rep_dev);
|
||||
efx_ef100_deconfigure_rep(efv);
|
||||
efx_ef100_rep_destroy_netdev(efv);
|
||||
}
|
||||
|
||||
|
@ -242,3 +325,111 @@ void efx_ef100_fini_vfreps(struct efx_nic *efx)
|
|||
list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
|
||||
efx_ef100_vfrep_destroy(efx, efv);
|
||||
}
|
||||
|
||||
static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
|
||||
{
|
||||
struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
|
||||
unsigned int read_index;
|
||||
struct list_head head;
|
||||
struct sk_buff *skb;
|
||||
bool need_resched;
|
||||
int spent = 0;
|
||||
|
||||
INIT_LIST_HEAD(&head);
|
||||
/* Grab up to 'weight' pending SKBs */
|
||||
spin_lock_bh(&efv->rx_lock);
|
||||
read_index = efv->write_index;
|
||||
while (spent < weight && !list_empty(&efv->rx_list)) {
|
||||
skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
|
||||
list_del(&skb->list);
|
||||
list_add_tail(&skb->list, &head);
|
||||
spent++;
|
||||
}
|
||||
spin_unlock_bh(&efv->rx_lock);
|
||||
/* Receive them */
|
||||
netif_receive_skb_list(&head);
|
||||
if (spent < weight)
|
||||
if (napi_complete_done(napi, spent)) {
|
||||
spin_lock_bh(&efv->rx_lock);
|
||||
efv->read_index = read_index;
|
||||
/* If write_index advanced while we were doing the
|
||||
* RX, then storing our read_index won't re-prime the
|
||||
* fake-interrupt. In that case, we need to schedule
|
||||
* NAPI again to consume the additional packet(s).
|
||||
*/
|
||||
need_resched = efv->write_index != read_index;
|
||||
spin_unlock_bh(&efv->rx_lock);
|
||||
if (need_resched)
|
||||
napi_schedule(&efv->napi);
|
||||
}
|
||||
return spent;
|
||||
}
|
||||
|
||||
void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
u8 *eh = efx_rx_buf_va(rx_buf);
|
||||
struct sk_buff *skb;
|
||||
bool primed;
|
||||
|
||||
/* Don't allow too many queued SKBs to build up, as they consume
|
||||
* GFP_ATOMIC memory. If we overrun, just start dropping.
|
||||
*/
|
||||
if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
|
||||
atomic64_inc(&efv->stats.rx_dropped);
|
||||
if (net_ratelimit())
|
||||
netif_dbg(efv->parent, rx_err, efv->net_dev,
|
||||
"nodesc-dropped packet of length %u\n",
|
||||
rx_buf->len);
|
||||
return;
|
||||
}
|
||||
|
||||
skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
|
||||
if (!skb) {
|
||||
atomic64_inc(&efv->stats.rx_dropped);
|
||||
if (net_ratelimit())
|
||||
netif_dbg(efv->parent, rx_err, efv->net_dev,
|
||||
"noskb-dropped packet of length %u\n",
|
||||
rx_buf->len);
|
||||
return;
|
||||
}
|
||||
memcpy(skb->data, eh, rx_buf->len);
|
||||
__skb_put(skb, rx_buf->len);
|
||||
|
||||
skb_record_rx_queue(skb, 0); /* rep is single-queue */
|
||||
|
||||
/* Move past the ethernet header */
|
||||
skb->protocol = eth_type_trans(skb, efv->net_dev);
|
||||
|
||||
skb_checksum_none_assert(skb);
|
||||
|
||||
atomic64_inc(&efv->stats.rx_packets);
|
||||
atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
|
||||
|
||||
/* Add it to the rx list */
|
||||
spin_lock_bh(&efv->rx_lock);
|
||||
primed = efv->read_index == efv->write_index;
|
||||
list_add_tail(&skb->list, &efv->rx_list);
|
||||
efv->write_index++;
|
||||
spin_unlock_bh(&efv->rx_lock);
|
||||
/* Trigger rx work */
|
||||
if (primed)
|
||||
napi_schedule(&efv->napi);
|
||||
}
|
||||
|
||||
struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
|
||||
{
|
||||
struct efx_rep *efv, *out = NULL;
|
||||
|
||||
/* spinlock guards against list mutation while we're walking it;
|
||||
* but caller must also hold rcu_read_lock() to ensure the netdev
|
||||
* isn't freed after we drop the spinlock.
|
||||
*/
|
||||
spin_lock_bh(&efx->vf_reps_lock);
|
||||
list_for_each_entry(efv, &efx->vf_reps, list)
|
||||
if (efv->mport == mport) {
|
||||
out = efv;
|
||||
break;
|
||||
}
|
||||
spin_unlock_bh(&efx->vf_reps_lock);
|
||||
return out;
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#define EF100_REP_H
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "tc.h"
|
||||
|
||||
struct efx_rep_sw_stats {
|
||||
atomic64_t rx_packets, tx_packets;
|
||||
|
@ -29,7 +30,14 @@ struct efx_rep_sw_stats {
|
|||
* @msg_enable: log message enable flags
|
||||
* @mport: m-port ID of corresponding VF
|
||||
* @idx: VF index
|
||||
* @write_index: number of packets enqueued to @rx_list
|
||||
* @read_index: number of packets consumed from @rx_list
|
||||
* @rx_pring_size: max length of RX list
|
||||
* @dflt: default-rule for MAE switching
|
||||
* @list: entry on efx->vf_reps
|
||||
* @rx_list: list of SKBs queued for receive in NAPI poll
|
||||
* @rx_lock: protects @rx_list
|
||||
* @napi: NAPI control structure
|
||||
* @stats: software traffic counters for netdev stats
|
||||
*/
|
||||
struct efx_rep {
|
||||
|
@ -38,7 +46,13 @@ struct efx_rep {
|
|||
u32 msg_enable;
|
||||
u32 mport;
|
||||
unsigned int idx;
|
||||
unsigned int write_index, read_index;
|
||||
unsigned int rx_pring_size;
|
||||
struct efx_tc_flow_rule dflt;
|
||||
struct list_head list;
|
||||
struct list_head rx_list;
|
||||
spinlock_t rx_lock;
|
||||
struct napi_struct napi;
|
||||
struct efx_rep_sw_stats stats;
|
||||
};
|
||||
|
||||
|
@ -46,4 +60,10 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i);
|
|||
void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv);
|
||||
void efx_ef100_fini_vfreps(struct efx_nic *efx);
|
||||
|
||||
void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
|
||||
/* Returns the representor corresponding to a VF m-port, or NULL
|
||||
* @mport is an m-port label, *not* an m-port ID!
|
||||
* Caller must hold rcu_read_lock().
|
||||
*/
|
||||
struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
|
||||
#endif /* EF100_REP_H */
|
||||
|
|
|
@ -55,10 +55,14 @@ static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
|
|||
|
||||
void __ef100_rx_packet(struct efx_channel *channel)
|
||||
{
|
||||
struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
|
||||
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
||||
struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue,
|
||||
channel->rx_pkt_index);
|
||||
struct efx_nic *efx = channel->efx;
|
||||
struct ef100_nic_data *nic_data;
|
||||
u8 *eh = efx_rx_buf_va(rx_buf);
|
||||
__wsum csum = 0;
|
||||
u16 ing_port;
|
||||
u32 *prefix;
|
||||
|
||||
prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
|
||||
|
@ -76,6 +80,37 @@ void __ef100_rx_packet(struct efx_channel *channel)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ing_port = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, INGRESS_MPORT));
|
||||
|
||||
nic_data = efx->nic_data;
|
||||
|
||||
if (nic_data->have_mport && ing_port != nic_data->base_mport) {
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
struct efx_rep *efv;
|
||||
|
||||
rcu_read_lock();
|
||||
efv = efx_ef100_find_rep_by_mport(efx, ing_port);
|
||||
if (efv) {
|
||||
if (efv->net_dev->flags & IFF_UP)
|
||||
efx_ef100_rep_rx_packet(efv, rx_buf);
|
||||
rcu_read_unlock();
|
||||
/* Representor Rx doesn't care about PF Rx buffer
|
||||
* ownership, it just makes a copy. So, we are done
|
||||
* with the Rx buffer from PF point of view and should
|
||||
* free it.
|
||||
*/
|
||||
goto free_rx_buffer;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
#endif
|
||||
if (net_ratelimit())
|
||||
netif_warn(efx, drv, efx->net_dev,
|
||||
"Unrecognised ing_port %04x (base %04x), dropping\n",
|
||||
ing_port, nic_data->base_mport);
|
||||
channel->n_rx_mport_bad++;
|
||||
goto free_rx_buffer;
|
||||
}
|
||||
|
||||
if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
|
||||
if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
|
||||
++channel->n_rx_ip_hdr_chksum_err;
|
||||
|
@ -87,17 +122,16 @@ void __ef100_rx_packet(struct efx_channel *channel)
|
|||
}
|
||||
|
||||
if (channel->type->receive_skb) {
|
||||
struct efx_rx_queue *rx_queue =
|
||||
efx_channel_get_rx_queue(channel);
|
||||
|
||||
/* no support for special channels yet, so just discard */
|
||||
WARN_ON_ONCE(1);
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
goto out;
|
||||
goto free_rx_buffer;
|
||||
}
|
||||
|
||||
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
|
||||
goto out;
|
||||
|
||||
free_rx_buffer:
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
out:
|
||||
channel->rx_pkt_n_frags = 0;
|
||||
}
|
||||
|
|
|
@ -501,14 +501,11 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
|
|||
efx_device_detach_sync(vf->efx);
|
||||
efx_net_stop(vf->efx->net_dev);
|
||||
|
||||
down_write(&vf->efx->filter_sem);
|
||||
vf->efx->type->filter_table_remove(vf->efx);
|
||||
|
||||
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
|
||||
if (rc) {
|
||||
up_write(&vf->efx->filter_sem);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
|
||||
|
@ -539,12 +536,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
|
|||
if (vf->efx) {
|
||||
/* VF cannot use the vport_id that the PF created */
|
||||
rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
|
||||
if (rc) {
|
||||
up_write(&vf->efx->filter_sem);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
vf->efx->type->filter_table_probe(vf->efx);
|
||||
up_write(&vf->efx->filter_sem);
|
||||
efx_net_open(vf->efx->net_dev);
|
||||
efx_device_attach_if_not_resetting(vf->efx);
|
||||
}
|
||||
|
@ -580,7 +574,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
|
|||
efx_net_stop(vf->efx->net_dev);
|
||||
|
||||
mutex_lock(&vf->efx->mac_lock);
|
||||
down_write(&vf->efx->filter_sem);
|
||||
vf->efx->type->filter_table_remove(vf->efx);
|
||||
|
||||
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
|
||||
|
@ -654,7 +647,6 @@ restore_filters:
|
|||
if (rc2)
|
||||
goto reset_nic_up_write;
|
||||
|
||||
up_write(&vf->efx->filter_sem);
|
||||
mutex_unlock(&vf->efx->mac_lock);
|
||||
|
||||
rc2 = efx_net_open(vf->efx->net_dev);
|
||||
|
@ -666,10 +658,8 @@ restore_filters:
|
|||
return rc;
|
||||
|
||||
reset_nic_up_write:
|
||||
if (vf->efx) {
|
||||
up_write(&vf->efx->filter_sem);
|
||||
if (vf->efx)
|
||||
mutex_unlock(&vf->efx->mac_lock);
|
||||
}
|
||||
reset_nic:
|
||||
if (vf->efx) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
|
|
|
@ -91,6 +91,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
|
|||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mport_bad),
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
|
||||
|
|
|
@ -88,6 +88,7 @@ enum efx_filter_priority {
|
|||
* the automatic filter in its place.
|
||||
* @EFX_FILTER_FLAG_RX: Filter is for RX
|
||||
* @EFX_FILTER_FLAG_TX: Filter is for TX
|
||||
* @EFX_FILTER_FLAG_VPORT_ID: Virtual port ID for adapter switching.
|
||||
*/
|
||||
enum efx_filter_flags {
|
||||
EFX_FILTER_FLAG_RX_RSS = 0x01,
|
||||
|
@ -95,6 +96,7 @@ enum efx_filter_flags {
|
|||
EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
|
||||
EFX_FILTER_FLAG_RX = 0x08,
|
||||
EFX_FILTER_FLAG_TX = 0x10,
|
||||
EFX_FILTER_FLAG_VPORT_ID = 0x20,
|
||||
};
|
||||
|
||||
/** enum efx_encap_type - types of encapsulation
|
||||
|
@ -127,6 +129,9 @@ enum efx_encap_type {
|
|||
* MCFW context_id.
|
||||
* @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
|
||||
* an RX drop filter
|
||||
* @vport_id: Virtual port ID associated with RX queue, for adapter switching,
|
||||
* if %EFX_FILTER_FLAG_VPORT_ID is set. This is an MCFW vport_id, or on
|
||||
* EF100 an mport selector.
|
||||
* @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
|
||||
* @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
|
||||
* @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
|
||||
|
@ -156,6 +161,7 @@ struct efx_filter_spec {
|
|||
u32 priority:2;
|
||||
u32 flags:6;
|
||||
u32 dmaq_id:12;
|
||||
u32 vport_id;
|
||||
u32 rss_context;
|
||||
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
|
||||
__be16 inner_vid;
|
||||
|
@ -292,6 +298,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_vport_id - override virtual port id relating to filter
|
||||
* @spec: Specification to initialise
|
||||
* @vport_id: firmware ID of the virtual port
|
||||
*/
|
||||
static inline void efx_filter_set_vport_id(struct efx_filter_spec *spec,
|
||||
u32 vport_id)
|
||||
{
|
||||
spec->flags |= EFX_FILTER_FLAG_VPORT_ID;
|
||||
spec->vport_id = vport_id;
|
||||
}
|
||||
|
||||
static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
|
||||
enum efx_encap_type encap_type)
|
||||
{
|
||||
|
|
|
@ -11,7 +11,65 @@
|
|||
|
||||
#include "mae.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "mcdi_pcol_mae.h"
|
||||
|
||||
int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
if (WARN_ON_ONCE(!id))
|
||||
return -EINVAL;
|
||||
if (WARN_ON_ONCE(!label))
|
||||
return -EINVAL;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_TYPE,
|
||||
MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS);
|
||||
MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT,
|
||||
MAE_MPORT_SELECTOR_ASSIGNED);
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_ALLOC, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < sizeof(outbuf))
|
||||
return -EIO;
|
||||
*id = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID);
|
||||
*label = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_LABEL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int efx_mae_free_mport(struct efx_nic *efx, u32 id)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_FREE_IN_LEN);
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_MAE_MPORT_FREE_OUT_LEN);
|
||||
MCDI_SET_DWORD(inbuf, MAE_MPORT_FREE_IN_MPORT_ID, id);
|
||||
return efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_FREE, inbuf, sizeof(inbuf),
|
||||
NULL, 0, NULL);
|
||||
}
|
||||
|
||||
void efx_mae_mport_wire(struct efx_nic *efx, u32 *out)
|
||||
{
|
||||
efx_dword_t mport;
|
||||
|
||||
EFX_POPULATE_DWORD_2(mport,
|
||||
MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_PPORT,
|
||||
MAE_MPORT_SELECTOR_PPORT_ID, efx->port_num);
|
||||
*out = EFX_DWORD_VAL(mport);
|
||||
}
|
||||
|
||||
void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out)
|
||||
{
|
||||
efx_dword_t mport;
|
||||
|
||||
EFX_POPULATE_DWORD_3(mport,
|
||||
MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
|
||||
MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
|
||||
MAE_MPORT_SELECTOR_FUNC_VF_ID, MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL);
|
||||
*out = EFX_DWORD_VAL(mport);
|
||||
}
|
||||
|
||||
void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
|
||||
{
|
||||
|
@ -24,6 +82,17 @@ void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
|
|||
*out = EFX_DWORD_VAL(mport);
|
||||
}
|
||||
|
||||
/* Constructs an mport selector from an mport ID, because they're not the same */
|
||||
void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out)
|
||||
{
|
||||
efx_dword_t mport;
|
||||
|
||||
EFX_POPULATE_DWORD_2(mport,
|
||||
MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_MPORT_ID,
|
||||
MAE_MPORT_SELECTOR_MPORT_ID, mport_id);
|
||||
*out = EFX_DWORD_VAL(mport);
|
||||
}
|
||||
|
||||
/* id is really only 24 bits wide */
|
||||
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
|
||||
{
|
||||
|
@ -42,3 +111,236 @@ int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
|
|||
*id = MCDI_DWORD(outbuf, MAE_MPORT_LOOKUP_OUT_MPORT_ID);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool efx_mae_asl_id(u32 id)
|
||||
{
|
||||
return !!(id & BIT(31));
|
||||
}
|
||||
|
||||
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
|
||||
MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
|
||||
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
|
||||
MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
|
||||
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
|
||||
MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL);
|
||||
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID,
|
||||
MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL);
|
||||
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID,
|
||||
MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL);
|
||||
if (act->deliver)
|
||||
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DELIVER,
|
||||
act->dest_mport);
|
||||
BUILD_BUG_ON(MAE_MPORT_SELECTOR_NULL);
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_ALLOC, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < sizeof(outbuf))
|
||||
return -EIO;
|
||||
act->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_ALLOC_OUT_AS_ID);
|
||||
/* We rely on the high bit of AS IDs always being clear.
|
||||
* The firmware API guarantees this, but let's check it ourselves.
|
||||
*/
|
||||
if (WARN_ON_ONCE(efx_mae_asl_id(act->fw_id))) {
|
||||
efx_mae_free_action_set(efx, act->fw_id);
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(1));
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(1));
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_FREE_IN_AS_ID, fw_id);
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_FREE, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < sizeof(outbuf))
|
||||
return -EIO;
|
||||
/* FW freed a different ID than we asked for, should never happen.
|
||||
* Warn because it means we've now got a different idea to the FW of
|
||||
* what action-sets exist, which could cause mayhem later.
|
||||
*/
|
||||
if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_FREE_OUT_FREED_AS_ID) != fw_id))
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int efx_mae_alloc_action_set_list(struct efx_nic *efx,
|
||||
struct efx_tc_action_set_list *acts)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN);
|
||||
struct efx_tc_action_set *act;
|
||||
size_t inlen, outlen, i = 0;
|
||||
efx_dword_t *inbuf;
|
||||
int rc;
|
||||
|
||||
list_for_each_entry(act, &acts->list, list)
|
||||
i++;
|
||||
if (i == 0)
|
||||
return -EINVAL;
|
||||
if (i == 1) {
|
||||
/* Don't wrap an ASL around a single AS, just use the AS_ID
|
||||
* directly. ASLs are a more limited resource.
|
||||
*/
|
||||
act = list_first_entry(&acts->list, struct efx_tc_action_set, list);
|
||||
acts->fw_id = act->fw_id;
|
||||
return 0;
|
||||
}
|
||||
if (i > MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2)
|
||||
return -EOPNOTSUPP; /* Too many actions */
|
||||
inlen = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(i);
|
||||
inbuf = kzalloc(inlen, GFP_KERNEL);
|
||||
if (!inbuf)
|
||||
return -ENOMEM;
|
||||
i = 0;
|
||||
list_for_each_entry(act, &acts->list, list) {
|
||||
MCDI_SET_ARRAY_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS,
|
||||
i, act->fw_id);
|
||||
i++;
|
||||
}
|
||||
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, i);
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_ALLOC, inbuf, inlen,
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
goto out_free;
|
||||
if (outlen < sizeof(outbuf)) {
|
||||
rc = -EIO;
|
||||
goto out_free;
|
||||
}
|
||||
acts->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID);
|
||||
/* We rely on the high bit of ASL IDs always being set.
|
||||
* The firmware API guarantees this, but let's check it ourselves.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!efx_mae_asl_id(acts->fw_id))) {
|
||||
efx_mae_free_action_set_list(efx, acts);
|
||||
rc = -EIO;
|
||||
}
|
||||
out_free:
|
||||
kfree(inbuf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int efx_mae_free_action_set_list(struct efx_nic *efx,
|
||||
struct efx_tc_action_set_list *acts)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1));
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1));
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
/* If this is just an AS_ID with no ASL wrapper, then there is
|
||||
* nothing for us to free. (The AS will be freed later.)
|
||||
*/
|
||||
if (efx_mae_asl_id(acts->fw_id)) {
|
||||
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_FREE_IN_ASL_ID,
|
||||
acts->fw_id);
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_FREE, inbuf,
|
||||
sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < sizeof(outbuf))
|
||||
return -EIO;
|
||||
/* FW freed a different ID than we asked for, should never happen.
|
||||
* Warn because it means we've now got a different idea to the FW of
|
||||
* what action-set-lists exist, which could cause mayhem later.
|
||||
*/
|
||||
if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) != acts->fw_id))
|
||||
return -EIO;
|
||||
}
|
||||
/* We're probably about to free @acts, but let's just make sure its
|
||||
* fw_id is blatted so that it won't look valid if it leaks out.
|
||||
*/
|
||||
acts->fw_id = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
|
||||
const struct efx_tc_match *match)
|
||||
{
|
||||
if (match->mask.ingress_port) {
|
||||
if (~match->mask.ingress_port)
|
||||
return -EOPNOTSUPP;
|
||||
MCDI_STRUCT_SET_DWORD(match_crit,
|
||||
MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR,
|
||||
match->value.ingress_port);
|
||||
}
|
||||
MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
|
||||
match->mask.ingress_port);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
|
||||
u32 prio, u32 acts_id, u32 *id)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN));
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN);
|
||||
MCDI_DECLARE_STRUCT_PTR(match_crit);
|
||||
MCDI_DECLARE_STRUCT_PTR(response);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
if (!id)
|
||||
return -EINVAL;
|
||||
|
||||
match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA);
|
||||
response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE);
|
||||
if (efx_mae_asl_id(acts_id)) {
|
||||
MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, acts_id);
|
||||
MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID,
|
||||
MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL);
|
||||
} else {
|
||||
/* We only had one AS, so we didn't wrap it in an ASL */
|
||||
MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID,
|
||||
MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
|
||||
MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, acts_id);
|
||||
}
|
||||
MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio);
|
||||
rc = efx_mae_populate_match_criteria(match_crit, match);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < sizeof(outbuf))
|
||||
return -EIO;
|
||||
*id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int efx_mae_delete_rule(struct efx_nic *efx, u32 id)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(1));
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(1));
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_DELETE_IN_AR_ID, id);
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_DELETE, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < sizeof(outbuf))
|
||||
return -EIO;
|
||||
/* FW freed a different ID than we asked for, should also never happen.
|
||||
* Warn because it means we've now got a different idea to the FW of
|
||||
* what rules exist, which could cause mayhem later.
|
||||
*/
|
||||
if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID) != id))
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -14,9 +14,29 @@
|
|||
/* MCDI interface for the ef100 Match-Action Engine */
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "tc.h"
|
||||
#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */
|
||||
|
||||
int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label);
|
||||
int efx_mae_free_mport(struct efx_nic *efx, u32 id);
|
||||
|
||||
void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
|
||||
void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
|
||||
void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
|
||||
void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
|
||||
|
||||
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
|
||||
|
||||
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
|
||||
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
|
||||
|
||||
int efx_mae_alloc_action_set_list(struct efx_nic *efx,
|
||||
struct efx_tc_action_set_list *acts);
|
||||
int efx_mae_free_action_set_list(struct efx_nic *efx,
|
||||
struct efx_tc_action_set_list *acts);
|
||||
|
||||
int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
|
||||
u32 prio, u32 acts_id, u32 *id);
|
||||
int efx_mae_delete_rule(struct efx_nic *efx, u32 id);
|
||||
|
||||
#endif /* EF100_MAE_H */
|
||||
|
|
|
@ -205,6 +205,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
|
|||
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
|
||||
#define _MCDI_DWORD(_buf, _field) \
|
||||
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
|
||||
#define _MCDI_STRUCT_DWORD(_buf, _field) \
|
||||
((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2))
|
||||
|
||||
#define MCDI_BYTE(_buf, _field) \
|
||||
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
|
||||
|
@ -214,6 +216,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
|
|||
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
|
||||
#define MCDI_SET_DWORD(_buf, _field, _value) \
|
||||
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
|
||||
#define MCDI_STRUCT_SET_DWORD(_buf, _field, _value) \
|
||||
EFX_POPULATE_DWORD_1(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0, _value)
|
||||
#define MCDI_DWORD(_buf, _field) \
|
||||
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
|
||||
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
|
||||
|
|
|
@ -221,7 +221,10 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
|
|||
efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
|
||||
}
|
||||
|
||||
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
|
||||
if (flags & EFX_FILTER_FLAG_VPORT_ID)
|
||||
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, spec->vport_id);
|
||||
else
|
||||
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
|
||||
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
|
||||
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
|
||||
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
|
||||
|
@ -488,6 +491,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
|
|||
saved_spec->flags |= spec->flags;
|
||||
saved_spec->rss_context = spec->rss_context;
|
||||
saved_spec->dmaq_id = spec->dmaq_id;
|
||||
saved_spec->vport_id = spec->vport_id;
|
||||
}
|
||||
} else if (!replacing) {
|
||||
kfree(saved_spec);
|
||||
|
|
|
@ -89,6 +89,7 @@ struct efx_mcdi_filter_table {
|
|||
*/
|
||||
bool mc_chaining;
|
||||
bool vlan_filter;
|
||||
/* Entries on the vlan_list are added/removed under filter_sem */
|
||||
struct list_head vlan_list;
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2019 Solarflare Communications Inc.
|
||||
* Copyright 2019-2022 Xilinx, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef MCDI_PCOL_MAE_H
|
||||
#define MCDI_PCOL_MAE_H
|
||||
/* MCDI definitions for Match-Action Engine functionality, that are
|
||||
* missing from the main mcdi_pcol.h
|
||||
*/
|
||||
|
||||
/* MC_CMD_MAE_COUNTER_LIST_ALLOC is not (yet) a released API, but the
|
||||
* following value is needed as an argument to MC_CMD_MAE_ACTION_SET_ALLOC.
|
||||
*/
|
||||
/* enum: A counter ID that is guaranteed never to represent a real counter */
|
||||
#define MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL 0xffffffff
|
||||
|
||||
#endif /* MCDI_PCOL_MAE_H */
|
|
@ -478,6 +478,8 @@ enum efx_sync_events_state {
|
|||
* @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
|
||||
* @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
|
||||
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
|
||||
* @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was
|
||||
* not recognised
|
||||
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
|
||||
* __efx_rx_packet(), or zero if there is none
|
||||
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
|
||||
|
@ -540,6 +542,7 @@ struct efx_channel {
|
|||
unsigned int n_rx_xdp_bad_drops;
|
||||
unsigned int n_rx_xdp_tx;
|
||||
unsigned int n_rx_xdp_redirect;
|
||||
unsigned int n_rx_mport_bad;
|
||||
|
||||
unsigned int rx_pkt_n_frags;
|
||||
unsigned int rx_pkt_index;
|
||||
|
@ -975,6 +978,7 @@ enum efx_xdp_tx_queues_mode {
|
|||
* @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
|
||||
* xdp_rxq_info structures?
|
||||
* @netdev_notifier: Netdevice notifier.
|
||||
* @tc: state for TC offload (EF100).
|
||||
* @mem_bar: The BAR that is mapped into membase.
|
||||
* @reg_base: Offset from the start of the bar to the function control window.
|
||||
* @monitor_work: Hardware monitor workitem
|
||||
|
@ -1158,6 +1162,7 @@ struct efx_nic {
|
|||
bool xdp_rxq_info_failed;
|
||||
|
||||
struct notifier_block netdev_notifier;
|
||||
struct efx_tc_state *tc;
|
||||
|
||||
unsigned int mem_bar;
|
||||
u32 reg_base;
|
||||
|
|
|
@ -793,7 +793,6 @@ int efx_probe_filters(struct efx_nic *efx)
|
|||
int rc;
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
down_write(&efx->filter_sem);
|
||||
rc = efx->type->filter_table_probe(efx);
|
||||
if (rc)
|
||||
goto out_unlock;
|
||||
|
@ -830,7 +829,6 @@ int efx_probe_filters(struct efx_nic *efx)
|
|||
}
|
||||
#endif
|
||||
out_unlock:
|
||||
up_write(&efx->filter_sem);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
return rc;
|
||||
}
|
||||
|
@ -846,9 +844,7 @@ void efx_remove_filters(struct efx_nic *efx)
|
|||
channel->rps_flow_id = NULL;
|
||||
}
|
||||
#endif
|
||||
down_write(&efx->filter_sem);
|
||||
efx->type->filter_table_remove(efx);
|
||||
up_write(&efx->filter_sem);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
|
|
|
@ -0,0 +1,252 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2019 Solarflare Communications Inc.
|
||||
* Copyright 2020-2022 Xilinx Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include "tc.h"
|
||||
#include "mae.h"
|
||||
#include "ef100_rep.h"
|
||||
#include "efx.h"
|
||||
|
||||
static void efx_tc_free_action_set(struct efx_nic *efx,
|
||||
struct efx_tc_action_set *act, bool in_hw)
|
||||
{
|
||||
/* Failure paths calling this on the 'running action' set in_hw=false,
|
||||
* because if the alloc had succeeded we'd've put it in acts.list and
|
||||
* not still have it in act.
|
||||
*/
|
||||
if (in_hw) {
|
||||
efx_mae_free_action_set(efx, act->fw_id);
|
||||
/* in_hw is true iff we are on an acts.list; make sure to
|
||||
* remove ourselves from that list before we are freed.
|
||||
*/
|
||||
list_del(&act->list);
|
||||
}
|
||||
kfree(act);
|
||||
}
|
||||
|
||||
static void efx_tc_free_action_set_list(struct efx_nic *efx,
|
||||
struct efx_tc_action_set_list *acts,
|
||||
bool in_hw)
|
||||
{
|
||||
struct efx_tc_action_set *act, *next;
|
||||
|
||||
/* Failure paths set in_hw=false, because usually the acts didn't get
|
||||
* to efx_mae_alloc_action_set_list(); if they did, the failure tree
|
||||
* has a separate efx_mae_free_action_set_list() before calling us.
|
||||
*/
|
||||
if (in_hw)
|
||||
efx_mae_free_action_set_list(efx, acts);
|
||||
/* Any act that's on the list will be in_hw even if the list isn't */
|
||||
list_for_each_entry_safe(act, next, &acts->list, list)
|
||||
efx_tc_free_action_set(efx, act, true);
|
||||
/* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
|
||||
}
|
||||
|
||||
static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
|
||||
{
|
||||
efx_mae_delete_rule(efx, rule->fw_id);
|
||||
|
||||
/* Release entries in subsidiary tables */
|
||||
efx_tc_free_action_set_list(efx, &rule->acts, true);
|
||||
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
|
||||
}
|
||||
|
||||
static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
|
||||
u32 eg_port, struct efx_tc_flow_rule *rule)
|
||||
{
|
||||
struct efx_tc_action_set_list *acts = &rule->acts;
|
||||
struct efx_tc_match *match = &rule->match;
|
||||
struct efx_tc_action_set *act;
|
||||
int rc;
|
||||
|
||||
match->value.ingress_port = ing_port;
|
||||
match->mask.ingress_port = ~0;
|
||||
act = kzalloc(sizeof(*act), GFP_KERNEL);
|
||||
if (!act)
|
||||
return -ENOMEM;
|
||||
act->deliver = 1;
|
||||
act->dest_mport = eg_port;
|
||||
rc = efx_mae_alloc_action_set(efx, act);
|
||||
if (rc)
|
||||
goto fail1;
|
||||
EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
|
||||
list_add_tail(&act->list, &acts->list);
|
||||
rc = efx_mae_alloc_action_set_list(efx, acts);
|
||||
if (rc)
|
||||
goto fail2;
|
||||
rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
|
||||
acts->fw_id, &rule->fw_id);
|
||||
if (rc)
|
||||
goto fail3;
|
||||
return 0;
|
||||
fail3:
|
||||
efx_mae_free_action_set_list(efx, acts);
|
||||
fail2:
|
||||
list_del(&act->list);
|
||||
efx_mae_free_action_set(efx, act->fw_id);
|
||||
fail1:
|
||||
kfree(act);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
|
||||
u32 ing_port, eg_port;
|
||||
|
||||
efx_mae_mport_uplink(efx, &ing_port);
|
||||
efx_mae_mport_wire(efx, &eg_port);
|
||||
return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
|
||||
}
|
||||
|
||||
static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
|
||||
u32 ing_port, eg_port;
|
||||
|
||||
efx_mae_mport_wire(efx, &ing_port);
|
||||
efx_mae_mport_uplink(efx, &eg_port);
|
||||
return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
|
||||
}
|
||||
|
||||
int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
|
||||
{
|
||||
struct efx_tc_flow_rule *rule = &efv->dflt;
|
||||
struct efx_nic *efx = efv->parent;
|
||||
u32 ing_port, eg_port;
|
||||
|
||||
efx_mae_mport_mport(efx, efv->mport, &ing_port);
|
||||
efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
|
||||
return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
|
||||
}
|
||||
|
||||
void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
|
||||
struct efx_tc_flow_rule *rule)
|
||||
{
|
||||
if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
|
||||
efx_tc_delete_rule(efx, rule);
|
||||
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
|
||||
}
|
||||
|
||||
static int efx_tc_configure_rep_mport(struct efx_nic *efx)
|
||||
{
|
||||
u32 rep_mport_label;
|
||||
int rc;
|
||||
|
||||
rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
|
||||
if (rc)
|
||||
return rc;
|
||||
pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
|
||||
efx->tc->reps_mport_id, rep_mport_label);
|
||||
/* Use mport *selector* as vport ID */
|
||||
efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
|
||||
&efx->tc->reps_mport_vport_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
|
||||
{
|
||||
efx_mae_free_mport(efx, efx->tc->reps_mport_id);
|
||||
efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
|
||||
}
|
||||
|
||||
int efx_tc_insert_rep_filters(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_filter_spec promisc, allmulti;
|
||||
int rc;
|
||||
|
||||
if (efx->type->is_vf)
|
||||
return 0;
|
||||
if (!efx->tc)
|
||||
return 0;
|
||||
efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
|
||||
efx_filter_set_uc_def(&promisc);
|
||||
efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
|
||||
rc = efx_filter_insert_filter(efx, &promisc, false);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
efx->tc->reps_filter_uc = rc;
|
||||
efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
|
||||
efx_filter_set_mc_def(&allmulti);
|
||||
efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
|
||||
rc = efx_filter_insert_filter(efx, &allmulti, false);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
efx->tc->reps_filter_mc = rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void efx_tc_remove_rep_filters(struct efx_nic *efx)
|
||||
{
|
||||
if (efx->type->is_vf)
|
||||
return;
|
||||
if (!efx->tc)
|
||||
return;
|
||||
if (efx->tc->reps_filter_mc >= 0)
|
||||
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
|
||||
efx->tc->reps_filter_mc = -1;
|
||||
if (efx->tc->reps_filter_uc >= 0)
|
||||
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
|
||||
efx->tc->reps_filter_uc = -1;
|
||||
}
|
||||
|
||||
int efx_init_tc(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = efx_tc_configure_default_rule_pf(efx);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = efx_tc_configure_default_rule_wire(efx);
|
||||
if (rc)
|
||||
return rc;
|
||||
return efx_tc_configure_rep_mport(efx);
|
||||
}
|
||||
|
||||
void efx_fini_tc(struct efx_nic *efx)
|
||||
{
|
||||
/* We can get called even if efx_init_struct_tc() failed */
|
||||
if (!efx->tc)
|
||||
return;
|
||||
efx_tc_deconfigure_rep_mport(efx);
|
||||
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
|
||||
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
|
||||
}
|
||||
|
||||
int efx_init_struct_tc(struct efx_nic *efx)
|
||||
{
|
||||
if (efx->type->is_vf)
|
||||
return 0;
|
||||
|
||||
efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
|
||||
if (!efx->tc)
|
||||
return -ENOMEM;
|
||||
|
||||
efx->tc->reps_filter_uc = -1;
|
||||
efx->tc->reps_filter_mc = -1;
|
||||
INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
|
||||
efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
|
||||
INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
|
||||
efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void efx_fini_struct_tc(struct efx_nic *efx)
|
||||
{
|
||||
if (!efx->tc)
|
||||
return;
|
||||
|
||||
EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
|
||||
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
|
||||
EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
|
||||
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
|
||||
kfree(efx->tc);
|
||||
efx->tc = NULL;
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2019 Solarflare Communications Inc.
|
||||
* Copyright 2020-2022 Xilinx Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_TC_H
|
||||
#define EFX_TC_H
|
||||
#include "net_driver.h"
|
||||
|
||||
struct efx_tc_action_set {
|
||||
u16 deliver:1;
|
||||
u32 dest_mport;
|
||||
u32 fw_id; /* index of this entry in firmware actions table */
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct efx_tc_match_fields {
|
||||
/* L1 */
|
||||
u32 ingress_port;
|
||||
};
|
||||
|
||||
struct efx_tc_match {
|
||||
struct efx_tc_match_fields value;
|
||||
struct efx_tc_match_fields mask;
|
||||
};
|
||||
|
||||
struct efx_tc_action_set_list {
|
||||
struct list_head list;
|
||||
u32 fw_id;
|
||||
};
|
||||
|
||||
struct efx_tc_flow_rule {
|
||||
struct efx_tc_match match;
|
||||
struct efx_tc_action_set_list acts;
|
||||
u32 fw_id;
|
||||
};
|
||||
|
||||
enum efx_tc_rule_prios {
|
||||
EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
|
||||
EFX_TC_PRIO__NUM
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_tc_state - control plane data for TC offload
|
||||
*
|
||||
* @reps_mport_id: MAE port allocated for representor RX
|
||||
* @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
|
||||
* @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
|
||||
* @reps_mport_vport_id: vport_id for representor RX filters
|
||||
* @dflt: Match-action rules for default switching; at priority
|
||||
* %EFX_TC_PRIO_DFLT. Named by *ingress* port
|
||||
* @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
|
||||
* @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
|
||||
*/
|
||||
struct efx_tc_state {
|
||||
u32 reps_mport_id, reps_mport_vport_id;
|
||||
s32 reps_filter_uc, reps_filter_mc;
|
||||
struct {
|
||||
struct efx_tc_flow_rule pf;
|
||||
struct efx_tc_flow_rule wire;
|
||||
} dflt;
|
||||
};
|
||||
|
||||
struct efx_rep;
|
||||
|
||||
int efx_tc_configure_default_rule_rep(struct efx_rep *efv);
|
||||
void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
|
||||
struct efx_tc_flow_rule *rule);
|
||||
|
||||
int efx_tc_insert_rep_filters(struct efx_nic *efx);
|
||||
void efx_tc_remove_rep_filters(struct efx_nic *efx);
|
||||
|
||||
int efx_init_tc(struct efx_nic *efx);
|
||||
void efx_fini_tc(struct efx_nic *efx);
|
||||
|
||||
int efx_init_struct_tc(struct efx_nic *efx);
|
||||
void efx_fini_struct_tc(struct efx_nic *efx);
|
||||
|
||||
#endif /* EFX_TC_H */
|
Loading…
Reference in New Issue