net/mlx5: DR, Add cache for modify header pattern
Starting with ConnectX-6 Dx, we use new design of modify_header FW object. The current modify_header object allows for having only limited number of FW objects, so the new design of pattern and argument allows pattern reuse, saving memory, and having a large number of modify_header objects. Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com> Reviewed-by: Alex Vesker <valex@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
parent
b47dddc624
commit
da5d0027d6
|
@ -2,12 +2,198 @@
|
|||
// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
|
||||
#include "dr_types.h"
|
||||
#include "mlx5_ifc_dr_ste_v1.h"
|
||||
|
||||
enum dr_ptrn_modify_hdr_action_id {
|
||||
DR_PTRN_MODIFY_HDR_ACTION_ID_NOP = 0x00,
|
||||
DR_PTRN_MODIFY_HDR_ACTION_ID_COPY = 0x05,
|
||||
DR_PTRN_MODIFY_HDR_ACTION_ID_SET = 0x06,
|
||||
DR_PTRN_MODIFY_HDR_ACTION_ID_ADD = 0x07,
|
||||
DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE = 0x0a,
|
||||
};
|
||||
|
||||
struct mlx5dr_ptrn_mgr {
|
||||
struct mlx5dr_domain *dmn;
|
||||
struct mlx5dr_icm_pool *ptrn_icm_pool;
|
||||
/* cache for modify_header ptrn */
|
||||
struct list_head ptrn_list;
|
||||
struct mutex modify_hdr_mutex; /* protect the pattern cache */
|
||||
};
|
||||
|
||||
/* Cache structure and functions */
|
||||
static bool dr_ptrn_compare_modify_hdr(size_t cur_num_of_actions,
|
||||
__be64 cur_hw_actions[],
|
||||
size_t num_of_actions,
|
||||
__be64 hw_actions[])
|
||||
{
|
||||
int i;
|
||||
|
||||
if (cur_num_of_actions != num_of_actions)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < num_of_actions; i++) {
|
||||
u8 action_id =
|
||||
MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
|
||||
|
||||
if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_COPY) {
|
||||
if (hw_actions[i] != cur_hw_actions[i])
|
||||
return false;
|
||||
} else {
|
||||
if ((__force __be32)hw_actions[i] !=
|
||||
(__force __be32)cur_hw_actions[i])
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct mlx5dr_ptrn_obj *
|
||||
dr_ptrn_find_cached_pattern(struct mlx5dr_ptrn_mgr *mgr,
|
||||
size_t num_of_actions,
|
||||
__be64 hw_actions[])
|
||||
{
|
||||
struct mlx5dr_ptrn_obj *cached_pattern;
|
||||
struct mlx5dr_ptrn_obj *tmp;
|
||||
|
||||
list_for_each_entry_safe(cached_pattern, tmp, &mgr->ptrn_list, list) {
|
||||
if (dr_ptrn_compare_modify_hdr(cached_pattern->num_of_actions,
|
||||
(__be64 *)cached_pattern->data,
|
||||
num_of_actions,
|
||||
hw_actions)) {
|
||||
/* Put this pattern in the head of the list,
|
||||
* as we will probably use it more.
|
||||
*/
|
||||
list_del_init(&cached_pattern->list);
|
||||
list_add(&cached_pattern->list, &mgr->ptrn_list);
|
||||
return cached_pattern;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct mlx5dr_ptrn_obj *
|
||||
dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr *mgr,
|
||||
u16 num_of_actions, u8 *data)
|
||||
{
|
||||
struct mlx5dr_ptrn_obj *pattern;
|
||||
struct mlx5dr_icm_chunk *chunk;
|
||||
u32 chunk_size;
|
||||
u32 index;
|
||||
|
||||
chunk_size = ilog2(num_of_actions);
|
||||
/* HW modify action index granularity is at least 64B */
|
||||
chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);
|
||||
|
||||
chunk = mlx5dr_icm_alloc_chunk(mgr->ptrn_icm_pool, chunk_size);
|
||||
if (!chunk)
|
||||
return NULL;
|
||||
|
||||
index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
|
||||
mgr->dmn->info.caps.hdr_modify_pattern_icm_addr) /
|
||||
DR_ACTION_CACHE_LINE_SIZE;
|
||||
|
||||
pattern = kzalloc(sizeof(*pattern), GFP_KERNEL);
|
||||
if (!pattern)
|
||||
goto free_chunk;
|
||||
|
||||
pattern->data = kzalloc(num_of_actions * DR_MODIFY_ACTION_SIZE *
|
||||
sizeof(*pattern->data), GFP_KERNEL);
|
||||
if (!pattern->data)
|
||||
goto free_pattern;
|
||||
|
||||
memcpy(pattern->data, data, num_of_actions * DR_MODIFY_ACTION_SIZE);
|
||||
pattern->chunk = chunk;
|
||||
pattern->index = index;
|
||||
pattern->num_of_actions = num_of_actions;
|
||||
|
||||
list_add(&pattern->list, &mgr->ptrn_list);
|
||||
refcount_set(&pattern->refcount, 1);
|
||||
|
||||
return pattern;
|
||||
|
||||
free_pattern:
|
||||
kfree(pattern);
|
||||
free_chunk:
|
||||
mlx5dr_icm_free_chunk(chunk);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
dr_ptrn_free_pattern(struct mlx5dr_ptrn_obj *pattern)
|
||||
{
|
||||
list_del(&pattern->list);
|
||||
mlx5dr_icm_free_chunk(pattern->chunk);
|
||||
kfree(pattern->data);
|
||||
kfree(pattern);
|
||||
}
|
||||
|
||||
struct mlx5dr_ptrn_obj *
|
||||
mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr,
|
||||
u16 num_of_actions,
|
||||
u8 *data)
|
||||
{
|
||||
struct mlx5dr_ptrn_obj *pattern;
|
||||
u64 *hw_actions;
|
||||
u8 action_id;
|
||||
int i;
|
||||
|
||||
mutex_lock(&mgr->modify_hdr_mutex);
|
||||
pattern = dr_ptrn_find_cached_pattern(mgr,
|
||||
num_of_actions,
|
||||
(__be64 *)data);
|
||||
if (!pattern) {
|
||||
/* Alloc and add new pattern to cache */
|
||||
pattern = dr_ptrn_alloc_pattern(mgr, num_of_actions, data);
|
||||
if (!pattern)
|
||||
goto out_unlock;
|
||||
|
||||
hw_actions = (u64 *)pattern->data;
|
||||
/* Here we mask the pattern data to create a valid pattern
|
||||
* since we do an OR operation between the arg and pattern
|
||||
*/
|
||||
for (i = 0; i < num_of_actions; i++) {
|
||||
action_id = MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
|
||||
|
||||
if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_SET ||
|
||||
action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_ADD ||
|
||||
action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE)
|
||||
MLX5_SET(ste_double_action_set_v1, &hw_actions[i], inline_data, 0);
|
||||
}
|
||||
|
||||
if (mlx5dr_send_postsend_pattern(mgr->dmn, pattern->chunk,
|
||||
num_of_actions, pattern->data)) {
|
||||
refcount_dec(&pattern->refcount);
|
||||
goto free_pattern;
|
||||
}
|
||||
} else {
|
||||
refcount_inc(&pattern->refcount);
|
||||
}
|
||||
|
||||
mutex_unlock(&mgr->modify_hdr_mutex);
|
||||
|
||||
return pattern;
|
||||
|
||||
free_pattern:
|
||||
dr_ptrn_free_pattern(pattern);
|
||||
out_unlock:
|
||||
mutex_unlock(&mgr->modify_hdr_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr,
|
||||
struct mlx5dr_ptrn_obj *pattern)
|
||||
{
|
||||
mutex_lock(&mgr->modify_hdr_mutex);
|
||||
|
||||
if (refcount_dec_and_test(&pattern->refcount))
|
||||
dr_ptrn_free_pattern(pattern);
|
||||
|
||||
mutex_unlock(&mgr->modify_hdr_mutex);
|
||||
}
|
||||
|
||||
struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn)
|
||||
{
|
||||
struct mlx5dr_ptrn_mgr *mgr;
|
||||
|
@ -26,6 +212,7 @@ struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn)
|
|||
goto free_mgr;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&mgr->ptrn_list);
|
||||
return mgr;
|
||||
|
||||
free_mgr:
|
||||
|
@ -35,9 +222,20 @@ free_mgr:
|
|||
|
||||
void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr)
|
||||
{
|
||||
struct mlx5dr_ptrn_obj *pattern;
|
||||
struct mlx5dr_ptrn_obj *tmp;
|
||||
|
||||
if (!mgr)
|
||||
return;
|
||||
|
||||
WARN_ON(!list_empty(&mgr->ptrn_list));
|
||||
|
||||
list_for_each_entry_safe(pattern, tmp, &mgr->ptrn_list, list) {
|
||||
list_del(&pattern->list);
|
||||
kfree(pattern->data);
|
||||
kfree(pattern);
|
||||
}
|
||||
|
||||
mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool);
|
||||
kfree(mgr);
|
||||
}
|
||||
|
|
|
@ -754,6 +754,26 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
|
|||
return dr_postsend_icm_data(dmn, &send_info);
|
||||
}
|
||||
|
||||
int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_icm_chunk *chunk,
|
||||
u16 num_of_actions,
|
||||
u8 *data)
|
||||
{
|
||||
struct postsend_info send_info = {};
|
||||
int ret;
|
||||
|
||||
send_info.write.addr = (uintptr_t)data;
|
||||
send_info.write.length = num_of_actions * DR_MODIFY_ACTION_SIZE;
|
||||
send_info.remote_addr = mlx5dr_icm_pool_get_chunk_mr_addr(chunk);
|
||||
send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(chunk);
|
||||
|
||||
ret = dr_postsend_icm_data(dmn, &send_info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
|
||||
struct mlx5dr_qp *dr_qp,
|
||||
int port)
|
||||
|
|
|
@ -1002,6 +1002,15 @@ struct mlx5dr_ste_action_modify_field {
|
|||
u8 l4_type;
|
||||
};
|
||||
|
||||
struct mlx5dr_ptrn_obj {
|
||||
struct mlx5dr_icm_chunk *chunk;
|
||||
u8 *data;
|
||||
u16 num_of_actions;
|
||||
u32 index;
|
||||
refcount_t refcount;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct mlx5dr_action_rewrite {
|
||||
struct mlx5dr_domain *dmn;
|
||||
struct mlx5dr_icm_chunk *chunk;
|
||||
|
@ -1011,6 +1020,7 @@ struct mlx5dr_action_rewrite {
|
|||
u8 allow_rx:1;
|
||||
u8 allow_tx:1;
|
||||
u8 modify_ttl:1;
|
||||
struct mlx5dr_ptrn_obj *ptrn;
|
||||
};
|
||||
|
||||
struct mlx5dr_action_reformat {
|
||||
|
@ -1448,6 +1458,10 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
|
|||
bool update_hw_ste);
|
||||
int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action *action);
|
||||
int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_icm_chunk *chunk,
|
||||
u16 num_of_actions,
|
||||
u8 *data);
|
||||
|
||||
int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn);
|
||||
void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn);
|
||||
|
@ -1537,5 +1551,9 @@ static inline bool mlx5dr_supp_match_ranges(struct mlx5_core_dev *dev)
|
|||
bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn);
|
||||
struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn);
|
||||
void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr);
|
||||
struct mlx5dr_ptrn_obj *mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr,
|
||||
u16 num_of_actions, u8 *data);
|
||||
void mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr,
|
||||
struct mlx5dr_ptrn_obj *pattern);
|
||||
|
||||
#endif /* _DR_TYPES_H_ */
|
||||
|
|
Loading…
Reference in New Issue