Merge branch 'mlx5-flow-steering'

Saeed Mahameed says:

====================
mlx5 improved flow steering management

First two patches fixes some minor issues in recently
introduced SRIOV code.

The other seven patches modifies the driver's code that
manages flow steering rules with Connectx-4 devices.

Basic introduction:

The flow steering device specification model is composed of the following entities:

Destination (either a TIR/Flow table/vport), where TIR is RSS end-point, vport
is the VF eSwitch port in SRIOV.

Flow table entry (FTE) - the values used by the flow specification
Flow table group (FG) - the masks used by the flow specification
Flow table (FT) - groups several FGs and can serve as destination

The flow steering software entities:

In addition to the device objects, the software have two more objects:

Priorities - group several FTs. Handles order of packet matching.

Namespaces - group several priorities. Namespace are used in order to
isolate different usages of steering (for example, add two separate
namespaces, one for the NIC driver and one for E-Switch FDB).

The base data structure for the flow steering management is a tree and
all the flow steering objects such as (Namespace/Flow table/Flow Group/FTE/etc.)
are represented as a node in the tree, e.g.:
Priority-0 -> FT1 -> FG -> FTE -> TIR (destination)
Priority-1 -> FT2 -> FG->  FTE -> TIR (destination)

Matching begins in FT1 flow rules and if there is a miss on all the FTEs
then matching continues on the FTEs in FT2.

The new implementation solves/improves the following
issues in the current code:

1) The new impl. supports multiple destinations, the search for existing rule with
   the same matching value is performed by the flow steering management.
   In the current impl. the E-switch FDB management code needs to search
   for existing rules before calling to the add rule function.

2) The new impl. manages the flow table level, in the current implementation the
   consumer states the flow table level when new flow table is created without
   any knowledge about the levels of other flow tables.

3) In the current impl. the consumer can't create or destroy flow
   groups dynamically, the flow groups are passed as argument to the create
   flow table API. The new impl. exposes API for create/destroy flow group.

The series is built as follows:

Patch #1 add flow steering API firmware commands.

Patch #2 add tree operation of the flow steering tree: add/remove node,
initialize node and take reference count on a node.

Patch #3 add essential algorithms for managing the flow steering.

Patch #4 Initialize the flow steering tree, flow steering initialization is based
on static tree which illustrates the flow steering tree when the driver is loaded.

Patch #5 is the main patch of the series. It introduce the flow steering API.

Patch #6 Expose the new flow steering API and remove the old one.
The Ethernet flow steering follows the existing implementation,
but uses the new steering API.

Patch #7 Rename en_flow_table.c to en_fs.c in order to be aligned with
the new flow steering files.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-12-12 00:15:25 -05:00
commit 9ad321b0b6
17 changed files with 2176 additions and 1042 deletions

View File

@ -2,7 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o eswitch.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
en_txrx.o en_txrx.o

View File

@ -64,6 +64,8 @@
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16 #define MLX5E_SQ_BF_BUDGET 16
#define MLX5E_NUM_MAIN_GROUPS 9
static const char vport_strings[][ETH_GSTRING_LEN] = { static const char vport_strings[][ETH_GSTRING_LEN] = {
/* vport statistics */ /* vport statistics */
"rx_packets", "rx_packets",
@ -442,7 +444,7 @@ enum mlx5e_rqt_ix {
struct mlx5e_eth_addr_info { struct mlx5e_eth_addr_info {
u8 addr[ETH_ALEN + 2]; u8 addr[ETH_ALEN + 2];
u32 tt_vec; u32 tt_vec;
u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */ struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
}; };
#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE) #define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
@ -466,15 +468,22 @@ enum {
struct mlx5e_vlan_db { struct mlx5e_vlan_db {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 active_vlans_ft_ix[VLAN_N_VID]; struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID];
u32 untagged_rule_ft_ix; struct mlx5_flow_rule *untagged_rule;
u32 any_vlan_rule_ft_ix; struct mlx5_flow_rule *any_vlan_rule;
bool filter_disabled; bool filter_disabled;
}; };
struct mlx5e_flow_table { struct mlx5e_flow_table {
void *vlan; int num_groups;
void *main; struct mlx5_flow_table *t;
struct mlx5_flow_group **g;
};
struct mlx5e_flow_tables {
struct mlx5_flow_namespace *ns;
struct mlx5e_flow_table vlan;
struct mlx5e_flow_table main;
}; };
struct mlx5e_priv { struct mlx5e_priv {
@ -497,7 +506,7 @@ struct mlx5e_priv {
u32 rqtn[MLX5E_NUM_RQT]; u32 rqtn[MLX5E_NUM_RQT];
u32 tirn[MLX5E_NUM_TT]; u32 tirn[MLX5E_NUM_TT];
struct mlx5e_flow_table ft; struct mlx5e_flow_tables fts;
struct mlx5e_eth_addr_db eth_addr; struct mlx5e_eth_addr_db eth_addr;
struct mlx5e_vlan_db vlan; struct mlx5e_vlan_db vlan;

View File

@ -30,7 +30,7 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/mlx5/flow_table.h> #include <linux/mlx5/fs.h>
#include "en.h" #include "en.h"
#include "eswitch.h" #include "eswitch.h"
@ -2103,6 +2103,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr); mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
if (is_zero_ether_addr(netdev->dev_addr) &&
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
eth_hw_addr_random(netdev);
mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
}
} }
static void mlx5e_build_netdev(struct net_device *netdev) static void mlx5e_build_netdev(struct net_device *netdev)

View File

@ -34,7 +34,7 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h> #include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include <linux/mlx5/flow_table.h> #include <linux/mlx5/fs.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "eswitch.h" #include "eswitch.h"
@ -321,220 +321,6 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
free_l2_table_index(l2_table, index); free_l2_table_index(l2_table, index);
} }
/* E-Switch FDB flow steering */
struct dest_node {
struct list_head list;
struct mlx5_flow_destination dest;
};
static int _mlx5_flow_rule_apply(struct mlx5_flow_rule *fr)
{
bool was_valid = fr->valid;
struct dest_node *dest_n;
u32 dest_list_size = 0;
void *in_match_value;
u32 *flow_context;
u32 flow_index;
int err;
int i;
if (list_empty(&fr->dest_list)) {
if (fr->valid)
mlx5_del_flow_table_entry(fr->ft, fr->fi);
fr->valid = false;
return 0;
}
list_for_each_entry(dest_n, &fr->dest_list, list)
dest_list_size++;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
MLX5_ST_SZ_BYTES(dest_format_struct) *
dest_list_size);
if (!flow_context)
return -ENOMEM;
MLX5_SET(flow_context, flow_context, flow_tag, fr->flow_tag);
MLX5_SET(flow_context, flow_context, action, fr->action);
MLX5_SET(flow_context, flow_context, destination_list_size,
dest_list_size);
i = 0;
list_for_each_entry(dest_n, &fr->dest_list, list) {
void *dest_addr = MLX5_ADDR_OF(flow_context, flow_context,
destination[i++]);
MLX5_SET(dest_format_struct, dest_addr, destination_type,
dest_n->dest.type);
MLX5_SET(dest_format_struct, dest_addr, destination_id,
dest_n->dest.vport_num);
}
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
memcpy(in_match_value, fr->match_value, MLX5_ST_SZ_BYTES(fte_match_param));
err = mlx5_add_flow_table_entry(fr->ft, fr->match_criteria_enable,
fr->match_criteria, flow_context,
&flow_index);
if (!err) {
if (was_valid)
mlx5_del_flow_table_entry(fr->ft, fr->fi);
fr->fi = flow_index;
fr->valid = true;
}
kfree(flow_context);
return err;
}
static int mlx5_flow_rule_add_dest(struct mlx5_flow_rule *fr,
struct mlx5_flow_destination *new_dest)
{
struct dest_node *dest_n;
int err;
dest_n = kzalloc(sizeof(*dest_n), GFP_KERNEL);
if (!dest_n)
return -ENOMEM;
memcpy(&dest_n->dest, new_dest, sizeof(dest_n->dest));
mutex_lock(&fr->mutex);
list_add(&dest_n->list, &fr->dest_list);
err = _mlx5_flow_rule_apply(fr);
if (err) {
list_del(&dest_n->list);
kfree(dest_n);
}
mutex_unlock(&fr->mutex);
return err;
}
static int mlx5_flow_rule_del_dest(struct mlx5_flow_rule *fr,
struct mlx5_flow_destination *dest)
{
struct dest_node *dest_n;
struct dest_node *n;
int err;
mutex_lock(&fr->mutex);
list_for_each_entry_safe(dest_n, n, &fr->dest_list, list) {
if (dest->vport_num == dest_n->dest.vport_num)
goto found;
}
mutex_unlock(&fr->mutex);
return -ENOENT;
found:
list_del(&dest_n->list);
err = _mlx5_flow_rule_apply(fr);
mutex_unlock(&fr->mutex);
kfree(dest_n);
return err;
}
static struct mlx5_flow_rule *find_fr(struct mlx5_eswitch *esw,
u8 match_criteria_enable,
u32 *match_value)
{
struct hlist_head *hash = esw->mc_table;
struct esw_mc_addr *esw_mc;
u8 *dmac_v;
dmac_v = MLX5_ADDR_OF(fte_match_param, match_value,
outer_headers.dmac_47_16);
/* UNICAST FULL MATCH */
if (!is_multicast_ether_addr(dmac_v))
return NULL;
/* MULTICAST FULL MATCH */
esw_mc = l2addr_hash_find(hash, dmac_v, struct esw_mc_addr);
return esw_mc ? esw_mc->uplink_rule : NULL;
}
static struct mlx5_flow_rule *alloc_fr(void *ft,
u8 match_criteria_enable,
u32 *match_criteria,
u32 *match_value,
u32 action,
u32 flow_tag)
{
struct mlx5_flow_rule *fr = kzalloc(sizeof(*fr), GFP_KERNEL);
if (!fr)
return NULL;
fr->match_criteria = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
fr->match_value = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
if (!fr->match_criteria || !fr->match_value) {
kfree(fr->match_criteria);
kfree(fr->match_value);
kfree(fr);
return NULL;
}
memcpy(fr->match_criteria, match_criteria, MLX5_ST_SZ_BYTES(fte_match_param));
memcpy(fr->match_value, match_value, MLX5_ST_SZ_BYTES(fte_match_param));
fr->match_criteria_enable = match_criteria_enable;
fr->flow_tag = flow_tag;
fr->action = action;
mutex_init(&fr->mutex);
INIT_LIST_HEAD(&fr->dest_list);
atomic_set(&fr->refcount, 0);
fr->ft = ft;
return fr;
}
static void deref_fr(struct mlx5_flow_rule *fr)
{
if (!atomic_dec_and_test(&fr->refcount))
return;
kfree(fr->match_criteria);
kfree(fr->match_value);
kfree(fr);
}
static struct mlx5_flow_rule *
mlx5_add_flow_rule(struct mlx5_eswitch *esw,
u8 match_criteria_enable,
u32 *match_criteria,
u32 *match_value,
u32 action,
u32 flow_tag,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_rule *fr;
int err;
fr = find_fr(esw, match_criteria_enable, match_value);
fr = fr ? fr : alloc_fr(esw->fdb_table.fdb, match_criteria_enable, match_criteria,
match_value, action, flow_tag);
if (!fr)
return NULL;
atomic_inc(&fr->refcount);
err = mlx5_flow_rule_add_dest(fr, dest);
if (err) {
deref_fr(fr);
return NULL;
}
return fr;
}
static void mlx5_del_flow_rule(struct mlx5_flow_rule *fr, u32 vport)
{
struct mlx5_flow_destination dest;
dest.vport_num = vport;
mlx5_flow_rule_del_dest(fr, &dest);
deref_fr(fr);
}
/* E-Switch FDB */ /* E-Switch FDB */
static struct mlx5_flow_rule * static struct mlx5_flow_rule *
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
@ -569,7 +355,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport); dmac_v, dmac_c, vport);
flow_rule = flow_rule =
mlx5_add_flow_rule(esw, mlx5_add_flow_rule(esw->fdb_table.fdb,
match_header, match_header,
match_c, match_c,
match_v, match_v,
@ -589,33 +375,61 @@ out:
static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_table_group g; struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb; struct mlx5_flow_table *fdb;
struct mlx5_flow_group *g;
void *match_criteria;
int table_size;
u32 *flow_group_in;
u8 *dmac; u8 *dmac;
int err = 0;
esw_debug(dev, "Create FDB log_max_size(%d)\n", esw_debug(dev, "Create FDB log_max_size(%d)\n",
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
memset(&g, 0, sizeof(g)); root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
/* UC MC Full match rules*/ if (!root_ns) {
g.log_sz = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); esw_warn(dev, "Failed to get FDB flow namespace\n");
g.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; return -ENOMEM;
dmac = MLX5_ADDR_OF(fte_match_param, g.match_criteria, }
outer_headers.dmac_47_16);
/* Match criteria mask */
memset(dmac, 0xff, 6);
fdb = mlx5_create_flow_table(dev, 0, flow_group_in = mlx5_vzalloc(inlen);
MLX5_FLOW_TABLE_TYPE_ESWITCH, if (!flow_group_in)
1, &g); return -ENOMEM;
if (fdb) memset(flow_group_in, 0, inlen);
esw_debug(dev, "ESW: FDB Table created fdb->id %d\n", mlx5_get_flow_table_id(fdb));
else
esw_warn(dev, "ESW: Failed to create FDB Table\n");
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
fdb = mlx5_create_flow_table(root_ns, 0, table_size);
if (IS_ERR_OR_NULL(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out;
}
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
eth_broadcast_addr(dmac);
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR_OR_NULL(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
goto out;
}
esw->fdb_table.addr_grp = g;
esw->fdb_table.fdb = fdb; esw->fdb_table.fdb = fdb;
return fdb ? 0 : -ENOMEM; out:
kfree(flow_group_in);
if (err && !IS_ERR_OR_NULL(fdb))
mlx5_destroy_flow_table(fdb);
return err;
} }
static void esw_destroy_fdb_table(struct mlx5_eswitch *esw) static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
@ -623,10 +437,11 @@ static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
if (!esw->fdb_table.fdb) if (!esw->fdb_table.fdb)
return; return;
esw_debug(esw->dev, "Destroy FDB Table fdb(%d)\n", esw_debug(esw->dev, "Destroy FDB Table\n");
mlx5_get_flow_table_id(esw->fdb_table.fdb)); mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
mlx5_destroy_flow_table(esw->fdb_table.fdb); mlx5_destroy_flow_table(esw->fdb_table.fdb);
esw->fdb_table.fdb = NULL; esw->fdb_table.fdb = NULL;
esw->fdb_table.addr_grp = NULL;
} }
/* E-Switch vport UC/MC lists management */ /* E-Switch vport UC/MC lists management */
@ -689,7 +504,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
del_l2_table_entry(esw->dev, esw_uc->table_index); del_l2_table_entry(esw->dev, esw_uc->table_index);
if (vaddr->flow_rule) if (vaddr->flow_rule)
mlx5_del_flow_rule(vaddr->flow_rule, vport); mlx5_del_flow_rule(vaddr->flow_rule);
vaddr->flow_rule = NULL; vaddr->flow_rule = NULL;
l2addr_hash_del(esw_uc); l2addr_hash_del(esw_uc);
@ -750,14 +565,14 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw_mc->uplink_rule); esw_mc->uplink_rule);
if (vaddr->flow_rule) if (vaddr->flow_rule)
mlx5_del_flow_rule(vaddr->flow_rule, vport); mlx5_del_flow_rule(vaddr->flow_rule);
vaddr->flow_rule = NULL; vaddr->flow_rule = NULL;
if (--esw_mc->refcnt) if (--esw_mc->refcnt)
return 0; return 0;
if (esw_mc->uplink_rule) if (esw_mc->uplink_rule)
mlx5_del_flow_rule(esw_mc->uplink_rule, UPLINK_VPORT); mlx5_del_flow_rule(esw_mc->uplink_rule);
l2addr_hash_del(esw_mc); l2addr_hash_del(esw_mc);
return 0; return 0;

View File

@ -88,20 +88,6 @@ struct l2addr_node {
kfree(ptr); \ kfree(ptr); \
}) })
struct mlx5_flow_rule {
void *ft;
u32 fi;
u8 match_criteria_enable;
u32 *match_criteria;
u32 *match_value;
u32 action;
u32 flow_tag;
bool valid;
atomic_t refcount;
struct mutex mutex; /* protect flow rule updates */
struct list_head dest_list;
};
struct mlx5_vport { struct mlx5_vport {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
int vport; int vport;
@ -126,6 +112,7 @@ struct mlx5_l2_table {
struct mlx5_eswitch_fdb { struct mlx5_eswitch_fdb {
void *fdb; void *fdb;
struct mlx5_flow_group *addr_grp;
}; };
struct mlx5_eswitch { struct mlx5_eswitch {

View File

@ -1,422 +0,0 @@
/*
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/export.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/flow_table.h>
#include "mlx5_core.h"
struct mlx5_ftg {
struct mlx5_flow_table_group g;
u32 id;
u32 start_ix;
};
struct mlx5_flow_table {
struct mlx5_core_dev *dev;
u8 level;
u8 type;
u32 id;
struct mutex mutex; /* sync bitmap alloc */
u16 num_groups;
struct mlx5_ftg *group;
unsigned long *bitmap;
u32 size;
};
static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
u32 flow_index, void *flow_context)
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)];
u32 *in;
void *in_flow_context;
int fcdls =
MLX5_GET(flow_context, flow_context, destination_list_size) *
MLX5_ST_SZ_BYTES(dest_format_struct);
int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
int err;
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
return -ENOMEM;
}
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, flow_index);
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
memcpy(in_flow_context, flow_context,
MLX5_ST_SZ_BYTES(flow_context) + fcdls);
MLX5_SET(flow_context, in_flow_context, group_id,
ft->group[group_ix].id);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
sizeof(out));
kvfree(in);
return err;
}
static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
{
u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
MLX5_SET_DFTEI(in, table_type, ft->type);
MLX5_SET_DFTEI(in, table_id, ft->id);
MLX5_SET_DFTEI(in, flow_index, flow_index);
MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
}
static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
MLX5_SET_DFGI(in, table_type, ft->type);
MLX5_SET_DFGI(in, table_id, ft->id);
MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET_DFGI(in, group_id, ft->group[i].id);
mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
{
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
u32 *in;
void *in_match_criteria;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_group *g = &ft->group[i].g;
u32 start_ix = ft->group[i].start_ix;
u32 end_ix = start_ix + (1 << g->log_sz) - 1;
int err;
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
return -ENOMEM;
}
in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
match_criteria);
memset(out, 0, sizeof(out));
#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
MLX5_SET_CFGI(in, table_type, ft->type);
MLX5_SET_CFGI(in, table_id, ft->id);
MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET_CFGI(in, start_flow_index, start_ix);
MLX5_SET_CFGI(in, end_flow_index, end_ix);
MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
memcpy(in_match_criteria, g->match_criteria,
MLX5_ST_SZ_BYTES(fte_match_param));
err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
sizeof(out));
if (!err)
ft->group[i].id = MLX5_GET(create_flow_group_out, out,
group_id);
kvfree(in);
return err;
}
static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
{
int i;
for (i = 0; i < ft->num_groups; i++)
mlx5_destroy_flow_group_cmd(ft, i);
}
static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
{
int err;
int i;
for (i = 0; i < ft->num_groups; i++) {
err = mlx5_create_flow_group_cmd(ft, i);
if (err)
goto err_destroy_flow_table_groups;
}
return 0;
err_destroy_flow_table_groups:
for (i--; i >= 0; i--)
mlx5_destroy_flow_group_cmd(ft, i);
return err;
}
static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
int err;
memset(in, 0, sizeof(in));
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, level, ft->level);
MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size));
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
sizeof(out));
if (err)
return err;
ft->id = MLX5_GET(create_flow_table_out, out, table_id);
return 0;
}
static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
MLX5_SET_DFTI(in, table_type, ft->type);
MLX5_SET_DFTI(in, table_id, ft->id);
MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
u32 *match_criteria, int *group_ix)
{
void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers);
void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
misc_parameters);
void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
inner_headers);
int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc);
int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
int i;
for (i = 0; i < ft->num_groups; i++) {
struct mlx5_flow_table_group *g = &ft->group[i].g;
void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
g->match_criteria,
outer_headers);
void *gmc_misc = MLX5_ADDR_OF(fte_match_param,
g->match_criteria,
misc_parameters);
void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
g->match_criteria,
inner_headers);
if (g->match_criteria_enable != match_criteria_enable)
continue;
if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
continue;
if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
continue;
if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
continue;
*group_ix = i;
return 0;
}
return -EINVAL;
}
static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
{
struct mlx5_ftg *g = &ft->group[group_ix];
int err = 0;
mutex_lock(&ft->mutex);
*ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
err = -ENOSPC;
else
__set_bit(*ix, ft->bitmap);
mutex_unlock(&ft->mutex);
return err;
}
static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
{
__clear_bit(ix, ft->bitmap);
}
int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
void *match_criteria, void *flow_context,
u32 *flow_index)
{
struct mlx5_flow_table *ft = flow_table;
int group_ix;
int err;
err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
&group_ix);
if (err) {
mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
return err;
}
err = alloc_flow_index(ft, group_ix, flow_index);
if (err) {
mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
return err;
}
return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
}
EXPORT_SYMBOL(mlx5_add_flow_table_entry);
void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
{
struct mlx5_flow_table *ft = flow_table;
mlx5_del_flow_entry_cmd(ft, flow_index);
mlx5_free_flow_index(ft, flow_index);
}
EXPORT_SYMBOL(mlx5_del_flow_table_entry);
void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
u16 num_groups,
struct mlx5_flow_table_group *group)
{
struct mlx5_flow_table *ft;
u32 start_ix = 0;
u32 ft_size = 0;
void *gr;
void *bm;
int err;
int i;
for (i = 0; i < num_groups; i++)
ft_size += (1 << group[i].log_sz);
ft = kzalloc(sizeof(*ft), GFP_KERNEL);
gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
if (!ft || !gr || !bm)
goto err_free_ft;
ft->group = gr;
ft->bitmap = bm;
ft->num_groups = num_groups;
ft->level = level;
ft->type = table_type;
ft->size = ft_size;
ft->dev = dev;
mutex_init(&ft->mutex);
for (i = 0; i < ft->num_groups; i++) {
memcpy(&ft->group[i].g, &group[i], sizeof(*group));
ft->group[i].start_ix = start_ix;
start_ix += 1 << group[i].log_sz;
}
err = mlx5_create_flow_table_cmd(ft);
if (err)
goto err_free_ft;
err = mlx5_create_flow_table_groups(ft);
if (err)
goto err_destroy_flow_table_cmd;
return ft;
err_destroy_flow_table_cmd:
mlx5_destroy_flow_table_cmd(ft);
err_free_ft:
mlx5_core_warn(dev, "failed to alloc flow table\n");
kfree(bm);
kfree(gr);
kfree(ft);
return NULL;
}
EXPORT_SYMBOL(mlx5_create_flow_table);
void mlx5_destroy_flow_table(void *flow_table)
{
struct mlx5_flow_table *ft = flow_table;
mlx5_destroy_flow_table_groups(ft);
mlx5_destroy_flow_table_cmd(ft);
kfree(ft->bitmap);
kfree(ft->group);
kfree(ft);
}
EXPORT_SYMBOL(mlx5_destroy_flow_table);
u32 mlx5_get_flow_table_id(void *flow_table)
{
struct mlx5_flow_table *ft = flow_table;
return ft->id;
}
EXPORT_SYMBOL(mlx5_get_flow_table_id);

View File

@ -0,0 +1,239 @@
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/mlx5_ifc.h>
#include "fs_core.h"
#include "fs_cmd.h"
#include "mlx5_core.h"
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, unsigned int *table_id)
{
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
int err;
memset(in, 0, sizeof(in));
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, type);
MLX5_SET(create_flow_table_in, in, level, level);
MLX5_SET(create_flow_table_in, in, log_size, log_size);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
if (!err)
*table_id = MLX5_GET(create_flow_table_out, out,
table_id);
return err;
}
int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
}
int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
u32 *in,
unsigned int *group_id)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
int err;
memset(out, 0, sizeof(out));
MLX5_SET(create_flow_group_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
MLX5_SET(create_flow_group_in, in, table_id, ft->id);
err = mlx5_cmd_exec_check_status(dev, in,
inlen, out,
sizeof(out));
if (!err)
*group_id = MLX5_GET(create_flow_group_out, out,
group_id);
return err;
}
int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int group_id)
{
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask,
struct mlx5_flow_table *ft,
unsigned group_id,
struct fs_fte *fte)
{
unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
u32 out[MLX5_ST_SZ_DW(set_fte_out)];
struct mlx5_flow_rule *dst;
void *in_flow_context;
void *in_match_value;
void *in_dests;
u32 *in;
int err;
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(dev, "failed to allocate inbox\n");
return -ENOMEM;
}
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
MLX5_SET(set_fte_in, in, op_mod, opmod);
MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action);
MLX5_SET(flow_context, in_flow_context, destination_list_size,
fte->dests_size);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
list_for_each_entry(dst, &fte->node.children, node.list) {
unsigned int id;
MLX5_SET(dest_format_struct, in_dests, destination_type,
dst->dest_attr.type);
if (dst->dest_attr.type ==
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
id = dst->dest_attr.ft->id;
else
id = dst->dest_attr.tir_num;
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
}
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
sizeof(out));
kvfree(in);
return err;
}
int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned group_id,
struct fs_fte *fte)
{
return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
}
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned group_id,
struct fs_fte *fte)
{
int opmod;
int modify_mask;
int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
flow_modify_en);
if (!atomic_mod_cap)
return -ENOTSUPP;
opmod = 1;
modify_mask = 1 <<
MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
}
int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int index)
{
u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, ft->type);
MLX5_SET(delete_fte_in, in, table_id, ft->id);
MLX5_SET(delete_fte_in, in, flow_index, index);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
return err;
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
@ -30,34 +30,36 @@
* SOFTWARE. * SOFTWARE.
*/ */
#ifndef MLX5_FLOW_TABLE_H #ifndef _MLX5_FS_CMD_
#define MLX5_FLOW_TABLE_H #define _MLX5_FS_CMD_
#include <linux/mlx5/driver.h> int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, unsigned int *table_id);
struct mlx5_flow_table_group { int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
u8 log_sz; struct mlx5_flow_table *ft);
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
};
struct mlx5_flow_destination { int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
enum mlx5_flow_destination_type type; struct mlx5_flow_table *ft,
union { u32 *in, unsigned int *group_id);
u32 tir_num;
void *ft;
u32 vport_num;
};
};
void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type, int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
u16 num_groups, struct mlx5_flow_table *ft,
struct mlx5_flow_table_group *group); unsigned int group_id);
void mlx5_destroy_flow_table(void *flow_table);
int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
void *match_criteria, void *flow_context,
u32 *flow_index);
void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
u32 mlx5_get_flow_table_id(void *flow_table);
#endif /* MLX5_FLOW_TABLE_H */ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned group_id,
struct fs_fte *fte);
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned group_id,
struct fs_fte *fte);
int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int index);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,155 @@
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _MLX5_FS_CORE_
#define _MLX5_FS_CORE_
#include <linux/mlx5/fs.h>
enum fs_node_type {
FS_TYPE_NAMESPACE,
FS_TYPE_PRIO,
FS_TYPE_FLOW_TABLE,
FS_TYPE_FLOW_GROUP,
FS_TYPE_FLOW_ENTRY,
FS_TYPE_FLOW_DEST
};
enum fs_flow_table_type {
FS_FT_NIC_RX = 0x0,
FS_FT_FDB = 0X4,
};
enum fs_fte_status {
FS_FTE_STATUS_EXISTING = 1UL << 0,
};
struct fs_node {
struct list_head list;
struct list_head children;
enum fs_node_type type;
struct fs_node *parent;
struct fs_node *root;
/* lock the node for writing and traversing */
struct mutex lock;
atomic_t refcount;
void (*remove_func)(struct fs_node *);
};
struct mlx5_flow_rule {
struct fs_node node;
struct mlx5_flow_destination dest_attr;
};
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_node node;
u32 id;
unsigned int max_fte;
unsigned int level;
enum fs_flow_table_type type;
};
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
u32 val[MLX5_ST_SZ_DW(fte_match_param)];
u32 dests_size;
u32 flow_tag;
u32 index;
u32 action;
enum fs_fte_status status;
};
/* Type of children is mlx5_flow_table/namespace */
struct fs_prio {
struct fs_node node;
unsigned int max_ft;
unsigned int start_level;
unsigned int prio;
unsigned int num_ft;
};
/* Type of children is fs_prio */
struct mlx5_flow_namespace {
/* parent == NULL => root ns */
struct fs_node node;
};
struct mlx5_flow_group_mask {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
};
/* Type of children is fs_fte */
struct mlx5_flow_group {
struct fs_node node;
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
u32 num_ftes;
u32 id;
};
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
};
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
#define fs_list_for_each_entry(pos, root) \
list_for_each_entry(pos, root, node.list)
#define fs_for_each_ns_or_ft_reverse(pos, prio) \
list_for_each_entry_reverse(pos, &(prio)->node.children, list)
#define fs_for_each_ns_or_ft(pos, prio) \
list_for_each_entry(pos, (&(prio)->node.children), list)
#define fs_for_each_prio(pos, ns) \
fs_list_for_each_entry(pos, &(ns)->node.children)
#define fs_for_each_fg(pos, ft) \
fs_list_for_each_entry(pos, &(ft)->node.children)
#define fs_for_each_fte(pos, fg) \
fs_list_for_each_entry(pos, &(fg)->node.children)
#define fs_for_each_dst(pos, fte) \
fs_list_for_each_entry(pos, &(fte)->node.children)
#endif

View File

@ -173,7 +173,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err; return err;
} }
if (MLX5_CAP_GEN(dev, vport_group_manager)) { if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH, err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
HCA_CAP_OPMOD_GET_CUR); HCA_CAP_OPMOD_GET_CUR);
if (err) if (err)

View File

@ -49,6 +49,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/mlx5/mlx5_ifc.h> #include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "fs_core.h"
#ifdef CONFIG_MLX5_CORE_EN #ifdef CONFIG_MLX5_CORE_EN
#include "eswitch.h" #include "eswitch.h"
#endif #endif
@ -1055,6 +1056,11 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_srq_table(dev); mlx5_init_srq_table(dev);
mlx5_init_mr_table(dev); mlx5_init_mr_table(dev);
err = mlx5_init_fs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init flow steering\n");
goto err_fs;
}
#ifdef CONFIG_MLX5_CORE_EN #ifdef CONFIG_MLX5_CORE_EN
err = mlx5_eswitch_init(dev); err = mlx5_eswitch_init(dev);
if (err) { if (err) {
@ -1093,6 +1099,8 @@ err_sriov:
mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif #endif
err_reg_dev: err_reg_dev:
mlx5_cleanup_fs(dev);
err_fs:
mlx5_cleanup_mr_table(dev); mlx5_cleanup_mr_table(dev);
mlx5_cleanup_srq_table(dev); mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev); mlx5_cleanup_qp_table(dev);
@ -1165,6 +1173,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif #endif
mlx5_cleanup_fs(dev);
mlx5_cleanup_mr_table(dev); mlx5_cleanup_mr_table(dev);
mlx5_cleanup_srq_table(dev); mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev); mlx5_cleanup_qp_table(dev);

View File

@ -65,6 +65,9 @@ do { \
(__dev)->priv.name, __func__, __LINE__, current->pid, \ (__dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define mlx5_core_info(__dev, format, ...) \
dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
enum { enum {
MLX5_CMD_DATA, /* print command payload only */ MLX5_CMD_DATA, /* print command payload only */
MLX5_CMD_TIME, /* print command execution time */ MLX5_CMD_TIME, /* print command execution time */

View File

@ -502,6 +502,8 @@ struct mlx5_priv {
struct mlx5_eswitch *eswitch; struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov; struct mlx5_core_sriov sriov;
unsigned long pci_dev_data; unsigned long pci_dev_data;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
}; };
enum mlx5_device_state { enum mlx5_device_state {

93
include/linux/mlx5/fs.h Normal file
View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _MLX5_FS_
#define _MLX5_FS_
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_FDB,
};
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_rule;
struct mlx5_flow_namespace;
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
u32 tir_num;
struct mlx5_flow_table *ft;
u32 vport_num;
};
};
struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values:
* start_flow_index
* end_flow_index
* match_criteria_enable
* match_criteria
*/
struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
struct mlx5_flow_rule *
mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
u32 *match_criteria,
u32 *match_value,
u32 action,
u32 flow_tag,
struct mlx5_flow_destination *dest);
void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
#endif

View File

@ -256,25 +256,27 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
struct mlx5_ifc_flow_table_prop_layout_bits { struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1]; u8 ft_support[0x1];
u8 reserved_0[0x1f]; u8 reserved_0[0x2];
u8 flow_modify_en[0x1];
u8 reserved_1[0x1c];
u8 reserved_1[0x2]; u8 reserved_2[0x2];
u8 log_max_ft_size[0x6]; u8 log_max_ft_size[0x6];
u8 reserved_2[0x10]; u8 reserved_3[0x10];
u8 max_ft_level[0x8]; u8 max_ft_level[0x8];
u8 reserved_3[0x20]; u8 reserved_4[0x20];
u8 reserved_4[0x18];
u8 log_max_ft_num[0x8];
u8 reserved_5[0x18]; u8 reserved_5[0x18];
u8 log_max_destination[0x8]; u8 log_max_ft_num[0x8];
u8 reserved_6[0x18]; u8 reserved_6[0x18];
u8 log_max_destination[0x8];
u8 reserved_7[0x18];
u8 log_max_flow[0x8]; u8 log_max_flow[0x8];
u8 reserved_7[0x40]; u8 reserved_8[0x40];
struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
@ -2843,6 +2845,13 @@ struct mlx5_ifc_set_hca_cap_in_bits {
union mlx5_ifc_hca_cap_union_bits capability; union mlx5_ifc_hca_cap_union_bits capability;
}; };
enum {
MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0,
MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1,
MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2,
MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3
};
struct mlx5_ifc_set_fte_out_bits { struct mlx5_ifc_set_fte_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_0[0x18]; u8 reserved_0[0x18];
@ -2867,11 +2876,14 @@ struct mlx5_ifc_set_fte_in_bits {
u8 reserved_4[0x8]; u8 reserved_4[0x8];
u8 table_id[0x18]; u8 table_id[0x18];
u8 reserved_5[0x40]; u8 reserved_5[0x18];
u8 modify_enable_mask[0x8];
u8 reserved_6[0x20];
u8 flow_index[0x20]; u8 flow_index[0x20];
u8 reserved_6[0xe0]; u8 reserved_7[0xe0];
struct mlx5_ifc_flow_context_bits flow_context; struct mlx5_ifc_flow_context_bits flow_context;
}; };