mlx5-updates-2021-01-05
SW steering, Refactor to have a device specific STE layer below dr_ste This series introduces some improvements and refactoring by adding a new layer below dr_ste to allow support for different devices format. It adds a struct of device specific callbacks for STE layer below dr_ste. Each device will implement its HW-specific function, and a common logic from the DR code will access these functions through the new ste_ctx API. Connect-X5-style steering format is called STE_v0. In the next patch series we bring the Connect-X6-style format - STE_v1. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl/076EACgkQSD+KveBX +j6bswf/cjaX3MboCU/K0oTrvu0NwTSbHELcriDf+83cWsBiRVwoukQseX2bQUBW nfNH+ebagaWF7qR8TWdCDNC8N4VT2eoZwnrbKTAkHPzHDBIN4+iV9MImhIe4ZgRK RmgW1rOoIP9FlVKXsKqYhaho/9CcUqgIhGaYWD6jdotuhkXz4uXYraWWlZpwmS+N e7MzHjsdFEvnA+0rUHGpCbIqV+ZgppUNaMDIAFdmUDrS6FJcNQR55TN9DHDsYcms 08XMXWKcVpieYMVLq+HrC0cCXpTZrEuV+ippdF8WJKhUgUX/Ilg1t3MV5PVd5aaz 6h1OsCBBE4IYFf0nJ5YiJUX82AbBSg== =3riE -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2021-01-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2021-01-05 SW steering, Refactor to have a device specific STE layer below dr_ste This series introduces some improvements and refactoring by adding a new layer below dr_ste to allow support for different devices format. It adds a struct of device specific callbacks for STE layer below dr_ste. Each device will implement its HW-specific function, and a common logic from the DR code will access these functions through the new ste_ctx API. Connect-X5-style steering format is called STE_v0. In the next patch series we bring the Connect-X6-style format - STE_v1. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
db471ed90f
|
@ -83,5 +83,6 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
|
|||
steering/dr_matcher.o steering/dr_rule.o \
|
||||
steering/dr_icm_pool.o steering/dr_buddy.o \
|
||||
steering/dr_ste.o steering/dr_send.o \
|
||||
steering/dr_ste_v0.o \
|
||||
steering/dr_cmd.o steering/dr_fw.o \
|
||||
steering/dr_action.o steering/fs_dr.o
|
||||
|
|
|
@ -218,158 +218,6 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
|
|||
},
|
||||
};
|
||||
|
||||
struct dr_action_modify_field_conv {
|
||||
u16 hw_field;
|
||||
u8 start;
|
||||
u8 end;
|
||||
u8 l3_type;
|
||||
u8 l4_type;
|
||||
};
|
||||
|
||||
static const struct dr_action_modify_field_conv dr_action_conv_arr[] = {
|
||||
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 16, .end = 47,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 0, .end = 15,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 32, .end = 47,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 16, .end = 47,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 0, .end = 15,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 0, .end = 5,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 48, .end = 56,
|
||||
.l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
|
||||
.l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
|
||||
.l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
|
||||
.l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
|
||||
.l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 32, .end = 63,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 0, .end = 31,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 32, .end = 63,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 0, .end = 31,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 32, .end = 63,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 0, .end = 31,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
|
||||
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 0, .end = 31,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 32, .end = 63,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 32, .end = 63,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 0, .end = 31,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 32, .end = 63,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 0, .end = 31,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 32, .end = 63,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 0, .end = 31,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 32, .end = 63,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 0, .end = 31,
|
||||
},
|
||||
[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
|
||||
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 0, .end = 15,
|
||||
},
|
||||
};
|
||||
|
||||
#define MAX_VLANS 2
|
||||
struct dr_action_vlan_info {
|
||||
int count;
|
||||
u32 headers[MAX_VLANS];
|
||||
};
|
||||
|
||||
struct dr_action_apply_attr {
|
||||
u32 modify_index;
|
||||
u16 modify_actions;
|
||||
u32 decap_index;
|
||||
u16 decap_actions;
|
||||
u8 decap_with_vlan:1;
|
||||
u64 final_icm_addr;
|
||||
u32 flow_tag;
|
||||
u32 ctr_id;
|
||||
u16 gvmi;
|
||||
u16 hit_gvmi;
|
||||
u32 reformat_id;
|
||||
u32 reformat_size;
|
||||
struct dr_action_vlan_info vlans;
|
||||
};
|
||||
|
||||
static int
|
||||
dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type,
|
||||
enum mlx5dr_action_type *action_type)
|
||||
|
@ -394,141 +242,6 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dr_actions_init_next_ste(u8 **last_ste,
|
||||
u32 *added_stes,
|
||||
enum mlx5dr_ste_entry_type entry_type,
|
||||
u16 gvmi)
|
||||
{
|
||||
(*added_stes)++;
|
||||
*last_ste += DR_STE_SIZE;
|
||||
mlx5dr_ste_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, entry_type, gvmi);
|
||||
}
|
||||
|
||||
static void dr_actions_apply_tx(struct mlx5dr_domain *dmn,
|
||||
u8 *action_type_set,
|
||||
u8 *last_ste,
|
||||
struct dr_action_apply_attr *attr,
|
||||
u32 *added_stes)
|
||||
{
|
||||
bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
|
||||
action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
|
||||
|
||||
/* We want to make sure the modify header comes before L2
|
||||
* encapsulation. The reason for that is that we support
|
||||
* modify headers for outer headers only
|
||||
*/
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
|
||||
mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
|
||||
mlx5dr_ste_set_rewrite_actions(last_ste,
|
||||
attr->modify_actions,
|
||||
attr->modify_index);
|
||||
}
|
||||
|
||||
if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < attr->vlans.count; i++) {
|
||||
if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
|
||||
dr_actions_init_next_ste(&last_ste,
|
||||
added_stes,
|
||||
MLX5DR_STE_TYPE_TX,
|
||||
attr->gvmi);
|
||||
|
||||
mlx5dr_ste_set_tx_push_vlan(last_ste,
|
||||
attr->vlans.headers[i],
|
||||
encap);
|
||||
}
|
||||
}
|
||||
|
||||
if (encap) {
|
||||
/* Modify header and encapsulation require a different STEs.
|
||||
* Since modify header STE format doesn't support encapsulation
|
||||
* tunneling_action.
|
||||
*/
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
|
||||
action_type_set[DR_ACTION_TYP_PUSH_VLAN])
|
||||
dr_actions_init_next_ste(&last_ste,
|
||||
added_stes,
|
||||
MLX5DR_STE_TYPE_TX,
|
||||
attr->gvmi);
|
||||
|
||||
mlx5dr_ste_set_tx_encap(last_ste,
|
||||
attr->reformat_id,
|
||||
attr->reformat_size,
|
||||
action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
|
||||
/* Whenever prio_tag_required enabled, we can be sure that the
|
||||
* previous table (ACL) already push vlan to our packet,
|
||||
* And due to HW limitation we need to set this bit, otherwise
|
||||
* push vlan + reformat will not work.
|
||||
*/
|
||||
if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
|
||||
mlx5dr_ste_set_go_back_bit(last_ste);
|
||||
}
|
||||
|
||||
if (action_type_set[DR_ACTION_TYP_CTR])
|
||||
mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
|
||||
}
|
||||
|
||||
static void dr_actions_apply_rx(u8 *action_type_set,
|
||||
u8 *last_ste,
|
||||
struct dr_action_apply_attr *attr,
|
||||
u32 *added_stes)
|
||||
{
|
||||
if (action_type_set[DR_ACTION_TYP_CTR])
|
||||
mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
|
||||
|
||||
if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
|
||||
mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
|
||||
mlx5dr_ste_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
|
||||
mlx5dr_ste_set_rewrite_actions(last_ste,
|
||||
attr->decap_actions,
|
||||
attr->decap_index);
|
||||
}
|
||||
|
||||
if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
|
||||
mlx5dr_ste_set_rx_decap(last_ste);
|
||||
|
||||
if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < attr->vlans.count; i++) {
|
||||
if (i ||
|
||||
action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
|
||||
action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
|
||||
dr_actions_init_next_ste(&last_ste,
|
||||
added_stes,
|
||||
MLX5DR_STE_TYPE_RX,
|
||||
attr->gvmi);
|
||||
|
||||
mlx5dr_ste_set_rx_pop_vlan(last_ste);
|
||||
}
|
||||
}
|
||||
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
|
||||
if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
|
||||
dr_actions_init_next_ste(&last_ste,
|
||||
added_stes,
|
||||
MLX5DR_STE_TYPE_MODIFY_PKT,
|
||||
attr->gvmi);
|
||||
else
|
||||
mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
|
||||
|
||||
mlx5dr_ste_set_rewrite_actions(last_ste,
|
||||
attr->modify_actions,
|
||||
attr->modify_index);
|
||||
}
|
||||
|
||||
if (action_type_set[DR_ACTION_TYP_TAG]) {
|
||||
if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
|
||||
dr_actions_init_next_ste(&last_ste,
|
||||
added_stes,
|
||||
MLX5DR_STE_TYPE_RX,
|
||||
attr->gvmi);
|
||||
|
||||
mlx5dr_ste_rx_set_flow_tag(last_ste, attr->flow_tag);
|
||||
}
|
||||
}
|
||||
|
||||
/* Apply the actions on the rule STE array starting from the last_ste.
|
||||
* Actions might require more than one STE, new_num_stes will return
|
||||
* the new size of the STEs array, rule with actions.
|
||||
|
@ -537,21 +250,20 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn,
|
|||
enum mlx5dr_ste_entry_type ste_type,
|
||||
u8 *action_type_set,
|
||||
u8 *last_ste,
|
||||
struct dr_action_apply_attr *attr,
|
||||
struct mlx5dr_ste_actions_attr *attr,
|
||||
u32 *new_num_stes)
|
||||
{
|
||||
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
|
||||
u32 added_stes = 0;
|
||||
|
||||
if (ste_type == MLX5DR_STE_TYPE_RX)
|
||||
dr_actions_apply_rx(action_type_set, last_ste, attr, &added_stes);
|
||||
mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set,
|
||||
last_ste, attr, &added_stes);
|
||||
else
|
||||
dr_actions_apply_tx(dmn, action_type_set, last_ste, attr, &added_stes);
|
||||
mlx5dr_ste_set_actions_tx(ste_ctx, dmn, action_type_set,
|
||||
last_ste, attr, &added_stes);
|
||||
|
||||
last_ste += added_stes * DR_STE_SIZE;
|
||||
*new_num_stes += added_stes;
|
||||
|
||||
mlx5dr_ste_set_hit_gvmi(last_ste, attr->hit_gvmi);
|
||||
mlx5dr_ste_set_hit_addr(last_ste, attr->final_icm_addr, 1);
|
||||
}
|
||||
|
||||
static enum dr_action_domain
|
||||
|
@ -643,9 +355,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
|
|||
bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
|
||||
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
|
||||
u8 action_type_set[DR_ACTION_TYP_MAX] = {};
|
||||
struct mlx5dr_ste_actions_attr attr = {};
|
||||
struct mlx5dr_action *dest_action = NULL;
|
||||
u32 state = DR_ACTION_STATE_NO_ACTION;
|
||||
struct dr_action_apply_attr attr = {};
|
||||
enum dr_action_domain action_domain;
|
||||
bool recalc_cs_required = false;
|
||||
u8 *last_ste;
|
||||
|
@ -756,12 +468,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
|
|||
}
|
||||
break;
|
||||
case DR_ACTION_TYP_POP_VLAN:
|
||||
max_actions_type = MAX_VLANS;
|
||||
max_actions_type = MLX5DR_MAX_VLANS;
|
||||
attr.vlans.count++;
|
||||
break;
|
||||
case DR_ACTION_TYP_PUSH_VLAN:
|
||||
max_actions_type = MAX_VLANS;
|
||||
if (attr.vlans.count == MAX_VLANS)
|
||||
max_actions_type = MLX5DR_MAX_VLANS;
|
||||
if (attr.vlans.count == MLX5DR_MAX_VLANS)
|
||||
return -EINVAL;
|
||||
|
||||
attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr;
|
||||
|
@ -817,132 +529,6 @@ out_invalid_arg:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define CVLAN_ETHERTYPE 0x8100
|
||||
#define SVLAN_ETHERTYPE 0x88a8
|
||||
#define HDR_LEN_L2_ONLY 14
|
||||
#define HDR_LEN_L2_VLAN 18
|
||||
#define REWRITE_HW_ACTION_NUM 6
|
||||
|
||||
static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action *action,
|
||||
void *data, size_t data_sz)
|
||||
{
|
||||
struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
|
||||
u64 ops[REWRITE_HW_ACTION_NUM] = {};
|
||||
u32 hdr_fld_4b;
|
||||
u16 hdr_fld_2b;
|
||||
u16 vlan_type;
|
||||
bool vlan;
|
||||
int i = 0;
|
||||
int ret;
|
||||
|
||||
vlan = (data_sz != HDR_LEN_L2_ONLY);
|
||||
|
||||
/* dmac_47_16 */
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_length, 0);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_left_shifter, 16);
|
||||
hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
inline_data, hdr_fld_4b);
|
||||
i++;
|
||||
|
||||
/* smac_47_16 */
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_length, 0);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_left_shifter, 16);
|
||||
hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
|
||||
MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
inline_data, hdr_fld_4b);
|
||||
i++;
|
||||
|
||||
/* dmac_15_0 */
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_length, 16);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_left_shifter, 0);
|
||||
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
inline_data, hdr_fld_2b);
|
||||
i++;
|
||||
|
||||
/* ethertype + (optional) vlan */
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_left_shifter, 32);
|
||||
if (!vlan) {
|
||||
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
|
||||
MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_2b);
|
||||
MLX5_SET(dr_action_hw_set, ops + i, destination_length, 16);
|
||||
} else {
|
||||
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
|
||||
vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
|
||||
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
|
||||
hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
|
||||
MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_4b);
|
||||
MLX5_SET(dr_action_hw_set, ops + i, destination_length, 18);
|
||||
}
|
||||
i++;
|
||||
|
||||
/* smac_15_0 */
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_length, 16);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_left_shifter, 0);
|
||||
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
inline_data, hdr_fld_2b);
|
||||
i++;
|
||||
|
||||
if (vlan) {
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
|
||||
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
inline_data, hdr_fld_2b);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_length, 16);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
|
||||
MLX5_SET(dr_action_hw_set, ops + i,
|
||||
destination_left_shifter, 0);
|
||||
i++;
|
||||
}
|
||||
|
||||
action->rewrite.data = (void *)ops;
|
||||
action->rewrite.num_of_actions = i;
|
||||
|
||||
ret = mlx5dr_send_postsend_action(dmn, action);
|
||||
if (ret) {
|
||||
mlx5dr_dbg(dmn, "Writing encapsulation action to ICM failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct mlx5dr_action *
|
||||
dr_action_create_generic(enum mlx5dr_action_type action_type)
|
||||
{
|
||||
|
@ -1217,21 +803,34 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
|
|||
}
|
||||
case DR_ACTION_TYP_TNL_L3_TO_L2:
|
||||
{
|
||||
/* Only Ethernet frame is supported, with VLAN (18) or without (14) */
|
||||
if (data_sz != HDR_LEN_L2_ONLY && data_sz != HDR_LEN_L2_VLAN)
|
||||
return -EINVAL;
|
||||
u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {};
|
||||
int ret;
|
||||
|
||||
ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
|
||||
data, data_sz,
|
||||
hw_actions,
|
||||
ACTION_CACHE_LINE_SIZE,
|
||||
&action->rewrite.num_of_actions);
|
||||
if (ret) {
|
||||
mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
|
||||
DR_CHUNK_SIZE_8);
|
||||
if (!action->rewrite.chunk)
|
||||
if (!action->rewrite.chunk) {
|
||||
mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
action->rewrite.data = (void *)hw_actions;
|
||||
action->rewrite.index = (action->rewrite.chunk->icm_addr -
|
||||
dmn->info.caps.hdr_modify_icm_addr) /
|
||||
ACTION_CACHE_LINE_SIZE;
|
||||
|
||||
ret = dr_actions_l2_rewrite(dmn, action, data, data_sz);
|
||||
ret = mlx5dr_send_postsend_action(dmn, action);
|
||||
if (ret) {
|
||||
mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
|
||||
mlx5dr_icm_free_chunk(action->rewrite.chunk);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1243,6 +842,9 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
|
|||
}
|
||||
}
|
||||
|
||||
#define CVLAN_ETHERTYPE 0x8100
|
||||
#define SVLAN_ETHERTYPE 0x88a8
|
||||
|
||||
struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void)
|
||||
{
|
||||
return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN);
|
||||
|
@ -1315,31 +917,13 @@ dec_ref:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static const struct dr_action_modify_field_conv *
|
||||
dr_action_modify_get_hw_info(u16 sw_field)
|
||||
{
|
||||
const struct dr_action_modify_field_conv *hw_action_info;
|
||||
|
||||
if (sw_field >= ARRAY_SIZE(dr_action_conv_arr))
|
||||
goto not_found;
|
||||
|
||||
hw_action_info = &dr_action_conv_arr[sw_field];
|
||||
if (!hw_action_info->end && !hw_action_info->start)
|
||||
goto not_found;
|
||||
|
||||
return hw_action_info;
|
||||
|
||||
not_found:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
|
||||
__be64 *sw_action,
|
||||
__be64 *hw_action,
|
||||
const struct dr_action_modify_field_conv **ret_hw_info)
|
||||
const struct mlx5dr_ste_action_modify_field **ret_hw_info)
|
||||
{
|
||||
const struct dr_action_modify_field_conv *hw_action_info;
|
||||
const struct mlx5dr_ste_action_modify_field *hw_action_info;
|
||||
u8 max_length;
|
||||
u16 sw_field;
|
||||
u32 data;
|
||||
|
@ -1349,7 +933,7 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
|
|||
data = MLX5_GET(set_action_in, sw_action, data);
|
||||
|
||||
/* Convert SW data to HW modify action format */
|
||||
hw_action_info = dr_action_modify_get_hw_info(sw_field);
|
||||
hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
|
||||
if (!hw_action_info) {
|
||||
mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
|
||||
return -EINVAL;
|
||||
|
@ -1357,20 +941,12 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
|
|||
|
||||
max_length = hw_action_info->end - hw_action_info->start + 1;
|
||||
|
||||
MLX5_SET(dr_action_hw_set, hw_action,
|
||||
opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD);
|
||||
|
||||
MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
|
||||
hw_action_info->hw_field);
|
||||
|
||||
MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
|
||||
hw_action_info->start);
|
||||
|
||||
/* PRM defines that length zero specific length of 32bits */
|
||||
MLX5_SET(dr_action_hw_set, hw_action, destination_length,
|
||||
max_length == 32 ? 0 : max_length);
|
||||
|
||||
MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
|
||||
mlx5dr_ste_set_action_add(dmn->ste_ctx,
|
||||
hw_action,
|
||||
hw_action_info->hw_field,
|
||||
hw_action_info->start,
|
||||
max_length,
|
||||
data);
|
||||
|
||||
*ret_hw_info = hw_action_info;
|
||||
|
||||
|
@ -1381,9 +957,9 @@ static int
|
|||
dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
|
||||
__be64 *sw_action,
|
||||
__be64 *hw_action,
|
||||
const struct dr_action_modify_field_conv **ret_hw_info)
|
||||
const struct mlx5dr_ste_action_modify_field **ret_hw_info)
|
||||
{
|
||||
const struct dr_action_modify_field_conv *hw_action_info;
|
||||
const struct mlx5dr_ste_action_modify_field *hw_action_info;
|
||||
u8 offset, length, max_length;
|
||||
u16 sw_field;
|
||||
u32 data;
|
||||
|
@ -1395,7 +971,7 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
|
|||
data = MLX5_GET(set_action_in, sw_action, data);
|
||||
|
||||
/* Convert SW data to HW modify action format */
|
||||
hw_action_info = dr_action_modify_get_hw_info(sw_field);
|
||||
hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
|
||||
if (!hw_action_info) {
|
||||
mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
|
||||
return -EINVAL;
|
||||
|
@ -1411,19 +987,12 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
MLX5_SET(dr_action_hw_set, hw_action,
|
||||
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
|
||||
|
||||
MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
|
||||
hw_action_info->hw_field);
|
||||
|
||||
MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
|
||||
hw_action_info->start + offset);
|
||||
|
||||
MLX5_SET(dr_action_hw_set, hw_action, destination_length,
|
||||
length == 32 ? 0 : length);
|
||||
|
||||
MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
|
||||
mlx5dr_ste_set_action_set(dmn->ste_ctx,
|
||||
hw_action,
|
||||
hw_action_info->hw_field,
|
||||
hw_action_info->start + offset,
|
||||
length,
|
||||
data);
|
||||
|
||||
*ret_hw_info = hw_action_info;
|
||||
|
||||
|
@ -1434,12 +1003,12 @@ static int
|
|||
dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
|
||||
__be64 *sw_action,
|
||||
__be64 *hw_action,
|
||||
const struct dr_action_modify_field_conv **ret_dst_hw_info,
|
||||
const struct dr_action_modify_field_conv **ret_src_hw_info)
|
||||
const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
|
||||
const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
|
||||
{
|
||||
u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
|
||||
const struct dr_action_modify_field_conv *hw_dst_action_info;
|
||||
const struct dr_action_modify_field_conv *hw_src_action_info;
|
||||
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
|
||||
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
|
||||
u16 src_field, dst_field;
|
||||
|
||||
/* Get SW modify action data */
|
||||
|
@ -1450,8 +1019,8 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
|
|||
length = MLX5_GET(copy_action_in, sw_action, length);
|
||||
|
||||
/* Convert SW data to HW modify action format */
|
||||
hw_src_action_info = dr_action_modify_get_hw_info(src_field);
|
||||
hw_dst_action_info = dr_action_modify_get_hw_info(dst_field);
|
||||
hw_src_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, src_field);
|
||||
hw_dst_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, dst_field);
|
||||
if (!hw_src_action_info || !hw_dst_action_info) {
|
||||
mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
|
||||
return -EINVAL;
|
||||
|
@ -1471,23 +1040,13 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
MLX5_SET(dr_action_hw_copy, hw_action,
|
||||
opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY);
|
||||
|
||||
MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code,
|
||||
hw_dst_action_info->hw_field);
|
||||
|
||||
MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter,
|
||||
hw_dst_action_info->start + dst_offset);
|
||||
|
||||
MLX5_SET(dr_action_hw_copy, hw_action, destination_length,
|
||||
length == 32 ? 0 : length);
|
||||
|
||||
MLX5_SET(dr_action_hw_copy, hw_action, source_field_code,
|
||||
hw_src_action_info->hw_field);
|
||||
|
||||
MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter,
|
||||
hw_src_action_info->start + dst_offset);
|
||||
mlx5dr_ste_set_action_copy(dmn->ste_ctx,
|
||||
hw_action,
|
||||
hw_dst_action_info->hw_field,
|
||||
hw_dst_action_info->start + dst_offset,
|
||||
length,
|
||||
hw_src_action_info->hw_field,
|
||||
hw_src_action_info->start + src_offset);
|
||||
|
||||
*ret_dst_hw_info = hw_dst_action_info;
|
||||
*ret_src_hw_info = hw_src_action_info;
|
||||
|
@ -1499,8 +1058,8 @@ static int
|
|||
dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
|
||||
__be64 *sw_action,
|
||||
__be64 *hw_action,
|
||||
const struct dr_action_modify_field_conv **ret_dst_hw_info,
|
||||
const struct dr_action_modify_field_conv **ret_src_hw_info)
|
||||
const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
|
||||
const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
|
||||
{
|
||||
u8 action;
|
||||
int ret;
|
||||
|
@ -1677,15 +1236,15 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
|||
u32 *num_hw_actions,
|
||||
bool *modify_ttl)
|
||||
{
|
||||
const struct dr_action_modify_field_conv *hw_dst_action_info;
|
||||
const struct dr_action_modify_field_conv *hw_src_action_info;
|
||||
u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
|
||||
u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
|
||||
u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
|
||||
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
|
||||
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
|
||||
struct mlx5dr_domain *dmn = action->rewrite.dmn;
|
||||
int ret, i, hw_idx = 0;
|
||||
__be64 *sw_action;
|
||||
__be64 hw_action;
|
||||
u16 hw_field = 0;
|
||||
u32 l3_type = 0;
|
||||
u32 l4_type = 0;
|
||||
|
||||
*modify_ttl = false;
|
||||
|
||||
|
|
|
@ -57,6 +57,12 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
|
|||
{
|
||||
int ret;
|
||||
|
||||
dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
|
||||
if (!dmn->ste_ctx) {
|
||||
mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
|
||||
if (ret) {
|
||||
mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
|
||||
|
|
|
@ -221,6 +221,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
|
|||
{
|
||||
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
|
||||
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
|
||||
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
|
||||
struct mlx5dr_match_param mask = {};
|
||||
struct mlx5dr_ste_build *sb;
|
||||
bool inner, rx;
|
||||
|
@ -259,80 +260,89 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
|
|||
inner = false;
|
||||
|
||||
if (dr_mask_is_wqe_metadata_set(&mask.misc2))
|
||||
mlx5dr_ste_build_general_purpose(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
|
||||
mlx5dr_ste_build_register_0(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
|
||||
mlx5dr_ste_build_register_1(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
|
||||
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
|
||||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
|
||||
mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
|
||||
dmn, inner, rx);
|
||||
mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
|
||||
&mask, dmn, inner, rx);
|
||||
}
|
||||
|
||||
if (dr_mask_is_smac_set(&mask.outer) &&
|
||||
dr_mask_is_dmac_set(&mask.outer)) {
|
||||
mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
}
|
||||
|
||||
if (dr_mask_is_smac_set(&mask.outer))
|
||||
mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
|
||||
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (outer_ipv == DR_RULE_IPV6) {
|
||||
if (dr_mask_is_dst_addr_set(&mask.outer))
|
||||
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (dr_mask_is_src_addr_set(&mask.outer))
|
||||
mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
|
||||
mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
} else {
|
||||
if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
|
||||
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (dr_mask_is_ttl_set(&mask.outer))
|
||||
mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
}
|
||||
|
||||
if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
|
||||
mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
else if (dr_mask_is_tnl_geneve(&mask, dmn))
|
||||
mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
|
||||
mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
|
||||
mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
|
||||
mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (dr_mask_is_icmp(&mask, dmn)) {
|
||||
ret = mlx5dr_ste_build_icmp(&sb[idx++],
|
||||
ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
|
||||
&mask, &dmn->info.caps,
|
||||
inner, rx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (dr_mask_is_tnl_gre_set(&mask.misc))
|
||||
mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
}
|
||||
|
||||
/* Inner */
|
||||
|
@ -343,50 +353,56 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
|
|||
inner = true;
|
||||
|
||||
if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
|
||||
mlx5dr_ste_build_eth_l2_tnl(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (dr_mask_is_smac_set(&mask.inner) &&
|
||||
dr_mask_is_dmac_set(&mask.inner)) {
|
||||
mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++],
|
||||
mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
}
|
||||
|
||||
if (dr_mask_is_smac_set(&mask.inner))
|
||||
mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
|
||||
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (inner_ipv == DR_RULE_IPV6) {
|
||||
if (dr_mask_is_dst_addr_set(&mask.inner))
|
||||
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (dr_mask_is_src_addr_set(&mask.inner))
|
||||
mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
|
||||
mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
} else {
|
||||
if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
|
||||
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (dr_mask_is_ttl_set(&mask.inner))
|
||||
mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
|
||||
inner, rx);
|
||||
mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
}
|
||||
|
||||
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
|
||||
mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
|
||||
mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
|
||||
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
|
||||
mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
|
||||
mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
|
||||
&mask, inner, rx);
|
||||
}
|
||||
/* Empty matcher, takes all */
|
||||
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
|
||||
|
|
|
@ -10,7 +10,8 @@ struct mlx5dr_rule_action_member {
|
|||
struct list_head list;
|
||||
};
|
||||
|
||||
static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
|
||||
static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste *new_last_ste,
|
||||
struct list_head *miss_list,
|
||||
struct list_head *send_list)
|
||||
{
|
||||
|
@ -25,7 +26,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
|
|||
if (!ste_info_last)
|
||||
return -ENOMEM;
|
||||
|
||||
mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
|
||||
mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
|
||||
mlx5dr_ste_get_icm_addr(new_last_ste));
|
||||
list_add_tail(&new_last_ste->miss_list_node, miss_list);
|
||||
|
||||
|
@ -42,6 +43,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
|
|||
u8 *hw_ste)
|
||||
{
|
||||
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
|
||||
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
|
||||
struct mlx5dr_ste_htbl *new_htbl;
|
||||
struct mlx5dr_ste *ste;
|
||||
|
||||
|
@ -57,7 +59,8 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
|
|||
|
||||
/* One and only entry, never grows */
|
||||
ste = new_htbl->ste_arr;
|
||||
mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
|
||||
mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
|
||||
nic_matcher->e_anchor->chunk->icm_addr);
|
||||
mlx5dr_htbl_get(new_htbl);
|
||||
|
||||
return ste;
|
||||
|
@ -169,6 +172,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
|
|||
struct mlx5dr_ste *col_ste,
|
||||
u8 *hw_ste)
|
||||
{
|
||||
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
|
||||
struct mlx5dr_ste *new_ste;
|
||||
int ret;
|
||||
|
||||
|
@ -180,11 +184,11 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
|
|||
new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
|
||||
|
||||
/* Update the previous from the list */
|
||||
ret = dr_rule_append_to_miss_list(new_ste,
|
||||
ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
|
||||
mlx5dr_ste_get_miss_list(col_ste),
|
||||
update_list);
|
||||
if (ret) {
|
||||
mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
|
||||
mlx5dr_dbg(dmn, "Failed update dup entry\n");
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
|
@ -224,6 +228,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
|
|||
struct mlx5dr_ste_htbl *new_htbl,
|
||||
struct list_head *update_list)
|
||||
{
|
||||
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
|
||||
struct mlx5dr_ste_send_info *ste_info;
|
||||
bool use_update_list = false;
|
||||
u8 hw_ste[DR_STE_SIZE] = {};
|
||||
|
@ -237,7 +242,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
|
|||
|
||||
/* Copy STE control and tag */
|
||||
memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
|
||||
mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
|
||||
mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
|
||||
nic_matcher->e_anchor->chunk->icm_addr);
|
||||
|
||||
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
|
||||
new_ste = &new_htbl->ste_arr[new_idx];
|
||||
|
@ -253,7 +259,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
|
|||
new_ste,
|
||||
hw_ste);
|
||||
if (!new_ste) {
|
||||
mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
|
||||
mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
|
||||
new_idx);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -391,7 +397,8 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
|
|||
/* Write new table to HW */
|
||||
info.type = CONNECT_MISS;
|
||||
info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
|
||||
mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
|
||||
mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
|
||||
dmn->info.caps.gvmi,
|
||||
nic_dmn,
|
||||
new_htbl,
|
||||
formatted_ste,
|
||||
|
@ -436,13 +443,15 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
|
|||
/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
|
||||
* (48B len) which works only on first 32B
|
||||
*/
|
||||
mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
|
||||
mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
|
||||
prev_htbl->ste_arr[0].hw_ste,
|
||||
new_htbl->chunk->icm_addr,
|
||||
new_htbl->chunk->num_of_entries);
|
||||
|
||||
ste_to_update = &prev_htbl->ste_arr[0];
|
||||
} else {
|
||||
mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
|
||||
mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
|
||||
cur_htbl->pointing_ste->hw_ste,
|
||||
new_htbl);
|
||||
ste_to_update = cur_htbl->pointing_ste;
|
||||
}
|
||||
|
@ -496,6 +505,8 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
|
|||
struct list_head *miss_list,
|
||||
struct list_head *send_list)
|
||||
{
|
||||
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
|
||||
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
|
||||
struct mlx5dr_ste_send_info *ste_info;
|
||||
struct mlx5dr_ste *new_ste;
|
||||
|
||||
|
@ -507,8 +518,9 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
|
|||
if (!new_ste)
|
||||
goto free_send_info;
|
||||
|
||||
if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
|
||||
mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
|
||||
if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
|
||||
miss_list, send_list)) {
|
||||
mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
|
@ -659,6 +671,7 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
|
|||
struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
|
||||
u8 num_of_builders = nic_matcher->num_of_builders;
|
||||
struct mlx5dr_matcher *matcher = rule->matcher;
|
||||
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
|
||||
u8 *curr_hw_ste, *prev_hw_ste;
|
||||
struct mlx5dr_ste *action_ste;
|
||||
int i, k, ret;
|
||||
|
@ -692,10 +705,12 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
|
|||
goto err_exit;
|
||||
|
||||
/* Point current ste to the new action */
|
||||
mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
|
||||
mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
|
||||
prev_hw_ste,
|
||||
action_ste->htbl);
|
||||
ret = dr_rule_add_member(nic_rule, action_ste);
|
||||
if (ret) {
|
||||
mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
|
||||
mlx5dr_dbg(dmn, "Failed adding rule member\n");
|
||||
goto free_ste_info;
|
||||
}
|
||||
mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
|
||||
|
@ -722,6 +737,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
|
|||
struct list_head *miss_list,
|
||||
struct list_head *send_list)
|
||||
{
|
||||
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
|
||||
struct mlx5dr_ste_send_info *ste_info;
|
||||
|
||||
/* Take ref on table, only on first time this ste is used */
|
||||
|
@ -730,7 +746,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
|
|||
/* new entry -> new branch */
|
||||
list_add_tail(&ste->miss_list_node, miss_list);
|
||||
|
||||
mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
|
||||
mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
|
||||
nic_matcher->e_anchor->chunk->icm_addr);
|
||||
|
||||
ste->ste_chain_location = ste_location;
|
||||
|
||||
|
@ -743,7 +760,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
|
|||
ste,
|
||||
hw_ste,
|
||||
DR_CHUNK_SIZE_1)) {
|
||||
mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
|
||||
mlx5dr_dbg(dmn, "Failed allocating table\n");
|
||||
goto clean_ste_info;
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,167 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
|
||||
/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
|
||||
|
||||
#ifndef _DR_STE_
|
||||
#define _DR_STE_
|
||||
|
||||
#include "dr_types.h"
|
||||
|
||||
#define STE_IPV4 0x1
|
||||
#define STE_IPV6 0x2
|
||||
#define STE_TCP 0x1
|
||||
#define STE_UDP 0x2
|
||||
#define STE_SPI 0x3
|
||||
#define IP_VERSION_IPV4 0x4
|
||||
#define IP_VERSION_IPV6 0x6
|
||||
#define STE_SVLAN 0x1
|
||||
#define STE_CVLAN 0x2
|
||||
#define HDR_LEN_L2_MACS 0xC
|
||||
#define HDR_LEN_L2_VLAN 0x4
|
||||
#define HDR_LEN_L2_ETHER 0x2
|
||||
#define HDR_LEN_L2 (HDR_LEN_L2_MACS + HDR_LEN_L2_ETHER)
|
||||
#define HDR_LEN_L2_W_VLAN (HDR_LEN_L2 + HDR_LEN_L2_VLAN)
|
||||
|
||||
/* Set to STE a specific value using DR_STE_SET */
|
||||
#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
|
||||
if ((spec)->s_fname) { \
|
||||
MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
|
||||
(spec)->s_fname = 0; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* Set to STE spec->s_fname to tag->t_fname set spec->s_fname as used */
|
||||
#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
|
||||
DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
|
||||
|
||||
/* Set to STE -1 to tag->t_fname and set spec->s_fname as used */
|
||||
#define DR_STE_SET_ONES(lookup_type, tag, t_fname, spec, s_fname) \
|
||||
DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, -1)
|
||||
|
||||
#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
|
||||
MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
|
||||
MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
|
||||
MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
|
||||
MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
|
||||
MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
|
||||
MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
|
||||
MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
|
||||
MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
|
||||
MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
|
||||
} while (0)
|
||||
|
||||
#define DR_STE_SET_MPLS(lookup_type, mask, in_out, tag) do { \
|
||||
struct mlx5dr_match_misc2 *_mask = mask; \
|
||||
u8 *_tag = tag; \
|
||||
DR_STE_SET_TAG(lookup_type, _tag, mpls0_label, _mask, \
|
||||
in_out##_first_mpls_label);\
|
||||
DR_STE_SET_TAG(lookup_type, _tag, mpls0_s_bos, _mask, \
|
||||
in_out##_first_mpls_s_bos); \
|
||||
DR_STE_SET_TAG(lookup_type, _tag, mpls0_exp, _mask, \
|
||||
in_out##_first_mpls_exp); \
|
||||
DR_STE_SET_TAG(lookup_type, _tag, mpls0_ttl, _mask, \
|
||||
in_out##_first_mpls_ttl); \
|
||||
} while (0)
|
||||
|
||||
#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
|
||||
(_misc)->outer_first_mpls_over_gre_label || \
|
||||
(_misc)->outer_first_mpls_over_gre_exp || \
|
||||
(_misc)->outer_first_mpls_over_gre_s_bos || \
|
||||
(_misc)->outer_first_mpls_over_gre_ttl)
|
||||
|
||||
#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
|
||||
(_misc)->outer_first_mpls_over_udp_label || \
|
||||
(_misc)->outer_first_mpls_over_udp_exp || \
|
||||
(_misc)->outer_first_mpls_over_udp_s_bos || \
|
||||
(_misc)->outer_first_mpls_over_udp_ttl)
|
||||
|
||||
enum dr_ste_action_modify_type_l3 {
|
||||
DR_STE_ACTION_MDFY_TYPE_L3_NONE = 0x0,
|
||||
DR_STE_ACTION_MDFY_TYPE_L3_IPV4 = 0x1,
|
||||
DR_STE_ACTION_MDFY_TYPE_L3_IPV6 = 0x2,
|
||||
};
|
||||
|
||||
enum dr_ste_action_modify_type_l4 {
|
||||
DR_STE_ACTION_MDFY_TYPE_L4_NONE = 0x0,
|
||||
DR_STE_ACTION_MDFY_TYPE_L4_TCP = 0x1,
|
||||
DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2,
|
||||
};
|
||||
|
||||
u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
|
||||
|
||||
#define DR_STE_CTX_BUILDER(fname) \
|
||||
((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
|
||||
struct mlx5dr_match_param *mask))
|
||||
|
||||
struct mlx5dr_ste_ctx {
|
||||
/* Builders */
|
||||
void DR_STE_CTX_BUILDER(eth_l2_src_dst);
|
||||
void DR_STE_CTX_BUILDER(eth_l3_ipv6_src);
|
||||
void DR_STE_CTX_BUILDER(eth_l3_ipv6_dst);
|
||||
void DR_STE_CTX_BUILDER(eth_l3_ipv4_5_tuple);
|
||||
void DR_STE_CTX_BUILDER(eth_l2_src);
|
||||
void DR_STE_CTX_BUILDER(eth_l2_dst);
|
||||
void DR_STE_CTX_BUILDER(eth_l2_tnl);
|
||||
void DR_STE_CTX_BUILDER(eth_l3_ipv4_misc);
|
||||
void DR_STE_CTX_BUILDER(eth_ipv6_l3_l4);
|
||||
void DR_STE_CTX_BUILDER(mpls);
|
||||
void DR_STE_CTX_BUILDER(tnl_gre);
|
||||
void DR_STE_CTX_BUILDER(tnl_mpls);
|
||||
int DR_STE_CTX_BUILDER(icmp);
|
||||
void DR_STE_CTX_BUILDER(general_purpose);
|
||||
void DR_STE_CTX_BUILDER(eth_l4_misc);
|
||||
void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
|
||||
void DR_STE_CTX_BUILDER(tnl_geneve);
|
||||
void DR_STE_CTX_BUILDER(register_0);
|
||||
void DR_STE_CTX_BUILDER(register_1);
|
||||
void DR_STE_CTX_BUILDER(src_gvmi_qpn);
|
||||
|
||||
/* Getters and Setters */
|
||||
void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
|
||||
u8 entry_type, u16 gvmi);
|
||||
void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
|
||||
u16 (*get_next_lu_type)(u8 *hw_ste_p);
|
||||
void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
|
||||
u64 (*get_miss_addr)(u8 *hw_ste_p);
|
||||
void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
|
||||
void (*set_byte_mask)(u8 *hw_ste_p, u16 byte_mask);
|
||||
u16 (*get_byte_mask)(u8 *hw_ste_p);
|
||||
|
||||
/* Actions */
|
||||
void (*set_actions_rx)(struct mlx5dr_domain *dmn,
|
||||
u8 *action_type_set,
|
||||
u8 *hw_ste_arr,
|
||||
struct mlx5dr_ste_actions_attr *attr,
|
||||
u32 *added_stes);
|
||||
void (*set_actions_tx)(struct mlx5dr_domain *dmn,
|
||||
u8 *action_type_set,
|
||||
u8 *hw_ste_arr,
|
||||
struct mlx5dr_ste_actions_attr *attr,
|
||||
u32 *added_stes);
|
||||
u32 modify_field_arr_sz;
|
||||
const struct mlx5dr_ste_action_modify_field *modify_field_arr;
|
||||
void (*set_action_set)(u8 *hw_action,
|
||||
u8 hw_field,
|
||||
u8 shifter,
|
||||
u8 length,
|
||||
u32 data);
|
||||
void (*set_action_add)(u8 *hw_action,
|
||||
u8 hw_field,
|
||||
u8 shifter,
|
||||
u8 length,
|
||||
u32 data);
|
||||
void (*set_action_copy)(u8 *hw_action,
|
||||
u8 dst_hw_field,
|
||||
u8 dst_shifter,
|
||||
u8 dst_len,
|
||||
u8 src_hw_field,
|
||||
u8 src_shifter);
|
||||
int (*set_action_decap_l3_list)(void *data,
|
||||
u32 data_sz,
|
||||
u8 *hw_action,
|
||||
u32 hw_action_sz,
|
||||
u16 *used_hw_action_num);
|
||||
};
|
||||
|
||||
extern struct mlx5dr_ste_ctx ste_ctx_v0;
|
||||
|
||||
#endif /* _DR_STE_ */
|
File diff suppressed because it is too large
Load Diff
|
@ -120,6 +120,7 @@ struct mlx5dr_ste_htbl;
|
|||
struct mlx5dr_match_param;
|
||||
struct mlx5dr_cmd_caps;
|
||||
struct mlx5dr_matcher_rx_tx;
|
||||
struct mlx5dr_ste_ctx;
|
||||
|
||||
struct mlx5dr_ste {
|
||||
u8 *hw_ste;
|
||||
|
@ -154,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl {
|
|||
};
|
||||
|
||||
struct mlx5dr_ste_htbl {
|
||||
u8 lu_type;
|
||||
u16 lu_type;
|
||||
u16 byte_mask;
|
||||
u32 refcount;
|
||||
struct mlx5dr_icm_chunk *chunk;
|
||||
|
@ -190,7 +191,7 @@ struct mlx5dr_ste_build {
|
|||
u8 vhca_id_valid:1;
|
||||
struct mlx5dr_domain *dmn;
|
||||
struct mlx5dr_cmd_caps *caps;
|
||||
u8 lu_type;
|
||||
u16 lu_type;
|
||||
u16 byte_mask;
|
||||
u8 bit_mask[DR_STE_SIZE_MASK];
|
||||
int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
|
||||
|
@ -201,7 +202,7 @@ struct mlx5dr_ste_build {
|
|||
struct mlx5dr_ste_htbl *
|
||||
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
|
||||
enum mlx5dr_icm_chunk_size chunk_size,
|
||||
u8 lu_type, u16 byte_mask);
|
||||
u16 lu_type, u16 byte_mask);
|
||||
|
||||
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
|
||||
|
||||
|
@ -219,35 +220,84 @@ static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
|
|||
|
||||
/* STE utils */
|
||||
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
|
||||
void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
|
||||
void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
|
||||
struct mlx5dr_ste_htbl *next_htbl);
|
||||
void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
|
||||
u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
|
||||
void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
|
||||
void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
|
||||
void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
|
||||
void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
u8 *hw_ste, u64 miss_addr);
|
||||
void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
u8 *hw_ste, u64 icm_addr, u32 ht_size);
|
||||
void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
u8 *hw_ste,
|
||||
struct mlx5dr_ste_htbl *next_htbl);
|
||||
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
|
||||
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
|
||||
u8 ste_location);
|
||||
void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
|
||||
void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
|
||||
void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
|
||||
int size, bool encap_l3);
|
||||
void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
|
||||
void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
|
||||
void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
|
||||
void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
|
||||
bool go_back);
|
||||
void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
|
||||
u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
|
||||
void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
|
||||
u32 re_write_index);
|
||||
void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
|
||||
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
|
||||
u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
|
||||
struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
|
||||
|
||||
#define MLX5DR_MAX_VLANS 2
|
||||
|
||||
struct mlx5dr_ste_actions_attr {
|
||||
u32 modify_index;
|
||||
u16 modify_actions;
|
||||
u32 decap_index;
|
||||
u16 decap_actions;
|
||||
u8 decap_with_vlan:1;
|
||||
u64 final_icm_addr;
|
||||
u32 flow_tag;
|
||||
u32 ctr_id;
|
||||
u16 gvmi;
|
||||
u16 hit_gvmi;
|
||||
u32 reformat_id;
|
||||
u32 reformat_size;
|
||||
struct {
|
||||
int count;
|
||||
u32 headers[MLX5DR_MAX_VLANS];
|
||||
} vlans;
|
||||
};
|
||||
|
||||
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_domain *dmn,
|
||||
u8 *action_type_set,
|
||||
u8 *last_ste,
|
||||
struct mlx5dr_ste_actions_attr *attr,
|
||||
u32 *added_stes);
|
||||
void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_domain *dmn,
|
||||
u8 *action_type_set,
|
||||
u8 *last_ste,
|
||||
struct mlx5dr_ste_actions_attr *attr,
|
||||
u32 *added_stes);
|
||||
|
||||
void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
__be64 *hw_action,
|
||||
u8 hw_field,
|
||||
u8 shifter,
|
||||
u8 length,
|
||||
u32 data);
|
||||
void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
__be64 *hw_action,
|
||||
u8 hw_field,
|
||||
u8 shifter,
|
||||
u8 length,
|
||||
u32 data);
|
||||
void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
__be64 *hw_action,
|
||||
u8 dst_hw_field,
|
||||
u8 dst_shifter,
|
||||
u8 dst_len,
|
||||
u8 src_hw_field,
|
||||
u8 src_shifter);
|
||||
int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
void *data,
|
||||
u32 data_sz,
|
||||
u8 *hw_action,
|
||||
u32 hw_action_sz,
|
||||
u16 *used_hw_action_num);
|
||||
|
||||
const struct mlx5dr_ste_action_modify_field *
|
||||
mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field);
|
||||
|
||||
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version);
|
||||
void mlx5dr_ste_free(struct mlx5dr_ste *ste,
|
||||
struct mlx5dr_matcher *matcher,
|
||||
struct mlx5dr_matcher_rx_tx *nic_matcher);
|
||||
|
@ -271,8 +321,6 @@ static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
|
|||
return !ste->refcount;
|
||||
}
|
||||
|
||||
void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
|
||||
struct mlx5dr_ste_htbl *next_htbl);
|
||||
bool mlx5dr_ste_equal_tag(void *src, void *dst);
|
||||
int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
|
||||
struct mlx5dr_matcher_rx_tx *nic_matcher,
|
||||
|
@ -289,65 +337,85 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
|
|||
struct mlx5dr_matcher_rx_tx *nic_matcher,
|
||||
struct mlx5dr_match_param *value,
|
||||
u8 *ste_arr);
|
||||
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder,
|
||||
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *builder,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
|
||||
int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
struct mlx5dr_cmd_caps *caps,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
|
||||
void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
struct mlx5dr_domain *dmn,
|
||||
bool inner, bool rx);
|
||||
|
@ -574,10 +642,10 @@ struct mlx5dr_match_misc3 {
|
|||
u32 outer_vxlan_gpe_next_protocol:8;
|
||||
u32 icmpv4_header_data;
|
||||
u32 icmpv6_header_data;
|
||||
u32 icmpv6_code:8;
|
||||
u32 icmpv6_type:8;
|
||||
u32 icmpv4_code:8;
|
||||
u32 icmpv4_type:8;
|
||||
u8 icmpv6_code;
|
||||
u8 icmpv6_type;
|
||||
u8 icmpv4_code;
|
||||
u8 icmpv4_type;
|
||||
u8 reserved_auto3[0x1c];
|
||||
};
|
||||
|
||||
|
@ -671,6 +739,7 @@ struct mlx5dr_domain {
|
|||
struct mlx5dr_send_ring *send_ring;
|
||||
struct mlx5dr_domain_info info;
|
||||
struct mlx5dr_domain_cache cache;
|
||||
struct mlx5dr_ste_ctx *ste_ctx;
|
||||
};
|
||||
|
||||
struct mlx5dr_table_rx_tx {
|
||||
|
@ -725,6 +794,14 @@ struct mlx5dr_rule_member {
|
|||
struct list_head use_ste_list;
|
||||
};
|
||||
|
||||
struct mlx5dr_ste_action_modify_field {
|
||||
u16 hw_field;
|
||||
u8 start;
|
||||
u8 end;
|
||||
u8 l3_type;
|
||||
u8 l4_type;
|
||||
};
|
||||
|
||||
struct mlx5dr_action {
|
||||
enum mlx5dr_action_type action_type;
|
||||
refcount_t refcount;
|
||||
|
@ -1000,7 +1077,8 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
|
|||
struct mlx5dr_ste_htbl *htbl,
|
||||
struct mlx5dr_htbl_connect_info *connect_info,
|
||||
bool update_hw_ste);
|
||||
void mlx5dr_ste_set_formatted_ste(u16 gvmi,
|
||||
void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
|
||||
u16 gvmi,
|
||||
struct mlx5dr_domain_rx_tx *nic_dmn,
|
||||
struct mlx5dr_ste_htbl *htbl,
|
||||
u8 *formatted_ste,
|
||||
|
|
|
@ -5,91 +5,6 @@
|
|||
#define MLX5_IFC_DR_H
|
||||
|
||||
enum {
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L2_0 = 0,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L2_1 = 1,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L2_2 = 2,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L3_0 = 3,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L3_1 = 4,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L3_2 = 5,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L3_3 = 6,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L3_4 = 7,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L4_0 = 8,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L4_1 = 9,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_MPLS = 10,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_0 = 11,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_REG_0 = 12,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_REG_1 = 13,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_REG_2 = 14,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_REG_3 = 15,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L4_2 = 16,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_0 = 17,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_1 = 18,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_2 = 19,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_3 = 20,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_1 = 21,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_METADATA = 22,
|
||||
MLX5DR_ACTION_MDFY_HW_FLD_RESERVED = 23,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
|
||||
MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
|
||||
MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE = 0x0,
|
||||
MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4 = 0x1,
|
||||
MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6 = 0x2,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE = 0x0,
|
||||
MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP = 0x1,
|
||||
MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP = 0x2,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5DR_STE_LU_TYPE_NOP = 0x00,
|
||||
MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
|
||||
MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
|
||||
MLX5DR_STE_LU_TYPE_ETHL2_DST_O = 0x06,
|
||||
MLX5DR_STE_LU_TYPE_ETHL2_DST_I = 0x07,
|
||||
MLX5DR_STE_LU_TYPE_ETHL2_DST_D = 0x1b,
|
||||
MLX5DR_STE_LU_TYPE_ETHL2_SRC_O = 0x08,
|
||||
MLX5DR_STE_LU_TYPE_ETHL2_SRC_I = 0x09,
|
||||
MLX5DR_STE_LU_TYPE_ETHL2_SRC_D = 0x1c,
|
||||
MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
|
||||
MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
|
||||
MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
|
||||
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
|
||||
MLX5DR_STE_LU_TYPE_ETHL4_O = 0x13,
|
||||
MLX5DR_STE_LU_TYPE_ETHL4_I = 0x14,
|
||||
MLX5DR_STE_LU_TYPE_ETHL4_D = 0x21,
|
||||
MLX5DR_STE_LU_TYPE_ETHL4_MISC_O = 0x2c,
|
||||
MLX5DR_STE_LU_TYPE_ETHL4_MISC_I = 0x2d,
|
||||
MLX5DR_STE_LU_TYPE_ETHL4_MISC_D = 0x2e,
|
||||
MLX5DR_STE_LU_TYPE_MPLS_FIRST_O = 0x15,
|
||||
MLX5DR_STE_LU_TYPE_MPLS_FIRST_I = 0x24,
|
||||
MLX5DR_STE_LU_TYPE_MPLS_FIRST_D = 0x25,
|
||||
MLX5DR_STE_LU_TYPE_GRE = 0x16,
|
||||
MLX5DR_STE_LU_TYPE_FLEX_PARSER_0 = 0x22,
|
||||
MLX5DR_STE_LU_TYPE_FLEX_PARSER_1 = 0x23,
|
||||
MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
|
||||
MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE = 0x18,
|
||||
MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
|
||||
MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
|
||||
MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f,
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue