net/mlx5: DR, Use HW specific logic API when writing STE

STEv0 format and STEv1 HW format are different, each has a
different order:
STEv0: CTRL 32B, TAG 16B, BITMASK 16B
STEv1: CTRL 32B, BITMASK 16B, TAG 16B

To make this transparent to upper layers we introduce a
new ste_ctx function to format the STE prior to writing it.

Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Signed-off-by: Alex Vesker <valex@nvidia.com>
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Yevgeny Kliteynik 2020-12-06 18:13:01 +02:00 committed by Saeed Mahameed
parent f06d496985
commit 4fe45e1d31
6 changed files with 60 additions and 13 deletions

View File

@ -106,10 +106,6 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
int ret;
list_del(&ste_info->send_list);
ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
ste_info->size, ste_info->offset);
if (ret)
goto out;
/* Copy data to ste, only reduced size or control, the last 16B (mask)
* is already written to the hw.
@ -119,6 +115,11 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
else
memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
ste_info->size, ste_info->offset);
if (ret)
goto out;
out:
kfree(ste_info);
return ret;

View File

@ -431,6 +431,8 @@ int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
{
struct postsend_info send_info = {};
mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, data, size);
send_info.write.addr = (uintptr_t)data;
send_info.write.length = size;
send_info.write.lkey = 0;
@ -457,6 +459,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
if (ret)
return ret;
mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, formatted_ste, DR_STE_SIZE);
/* Send the data iteration times */
for (i = 0; i < iterations; i++) {
u32 ste_index = i * (byte_size / DR_STE_SIZE);
@ -480,6 +484,10 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
/* Copy bit_mask */
memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
mask, DR_STE_SIZE_MASK);
/* Only when we have mask we need to re-arrange the STE */
mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx,
data + (j * DR_STE_SIZE),
DR_STE_SIZE);
}
}
@ -509,6 +517,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
u32 byte_size = htbl->chunk->byte_size;
int iterations;
int num_stes;
u8 *copy_dst;
u8 *data;
int ret;
int i;
@ -518,20 +527,22 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
if (ret)
return ret;
for (i = 0; i < num_stes; i++) {
u8 *copy_dst;
/* Copy the same ste on the data buffer */
copy_dst = data + i * DR_STE_SIZE;
memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
if (update_hw_ste) {
/* Copy the reduced ste to hash table ste_arr */
if (update_hw_ste) {
/* Copy the reduced STE to hash table ste_arr */
for (i = 0; i < num_stes; i++) {
copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
}
}
mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, ste_init_data, DR_STE_SIZE);
/* Copy the same STE on the data buffer */
for (i = 0; i < num_stes; i++) {
copy_dst = data + i * DR_STE_SIZE;
memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
}
/* Send the data iteration times */
for (i = 0; i < iterations; i++) {
u8 ste_index = i * (byte_size / DR_STE_SIZE);

View File

@ -356,6 +356,13 @@ void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
}
void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste_p, u32 ste_size)
{
if (ste_ctx->prepare_for_postsend)
ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
}
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
u16 gvmi,

View File

@ -160,6 +160,9 @@ struct mlx5dr_ste_ctx {
u8 *hw_action,
u32 hw_action_sz,
u16 *used_hw_action_num);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
extern struct mlx5dr_ste_ctx ste_ctx_v0;

View File

@ -324,6 +324,26 @@ static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
}
static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p,
u32 ste_size)
{
u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
u8 *mask = tag + DR_STE_SIZE_TAG;
u8 tmp_tag[DR_STE_SIZE_TAG] = {};
if (ste_size == DR_STE_SIZE_CTRL)
return;
WARN_ON(ste_size != DR_STE_SIZE);
/* Backup tag */
memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
/* Swap mask and tag both are the same size */
memcpy(tag, mask, DR_STE_SIZE_MASK);
memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
}
static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
{
MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
@ -1608,4 +1628,6 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_action_add = &dr_ste_v1_set_action_add,
.set_action_copy = &dr_ste_v1_set_action_copy,
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};

View File

@ -1072,6 +1072,9 @@ struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size);
void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste_p, u32 ste_size);
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,