mlxsw: Revert "Prepare for XM implementation - LPM trees"
This reverts commit 923ba95ea2
("Merge branch
'mlxsw-spectrum-prepare-for-xm-implementation-lpm-trees'").
Signed-off-by: Petr Machata <petrm@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
725ff53204
commit
87c0a3c676
|
@ -8924,86 +8924,6 @@ mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
|
|||
mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
|
||||
}
|
||||
|
||||
/* Note that XRALXX register position violates the rule of ordering register
|
||||
* definition by the ID. However, XRALXX pack helpers are using RALXX pack
|
||||
* helpers, RALXX registers have higher IDs.
|
||||
*/
|
||||
|
||||
/* XRALTA - XM Router Algorithmic LPM Tree Allocation Register
|
||||
* -----------------------------------------------------------
|
||||
* The XRALTA is used to allocate the XLT LPM trees.
|
||||
*
|
||||
* This register embeds original RALTA register.
|
||||
*/
|
||||
#define MLXSW_REG_XRALTA_ID 0x7811
|
||||
#define MLXSW_REG_XRALTA_LEN 0x08
|
||||
#define MLXSW_REG_XRALTA_RALTA_OFFSET 0x04
|
||||
|
||||
MLXSW_REG_DEFINE(xralta, MLXSW_REG_XRALTA_ID, MLXSW_REG_XRALTA_LEN);
|
||||
|
||||
static inline void mlxsw_reg_xralta_pack(char *payload, bool alloc,
|
||||
enum mlxsw_reg_ralxx_protocol protocol,
|
||||
u8 tree_id)
|
||||
{
|
||||
char *ralta_payload = payload + MLXSW_REG_XRALTA_RALTA_OFFSET;
|
||||
|
||||
MLXSW_REG_ZERO(xralta, payload);
|
||||
mlxsw_reg_ralta_pack(ralta_payload, alloc, protocol, tree_id);
|
||||
}
|
||||
|
||||
/* XRALST - XM Router Algorithmic LPM Structure Tree Register
|
||||
* ----------------------------------------------------------
|
||||
* The XRALST is used to set and query the structure of an XLT LPM tree.
|
||||
*
|
||||
* This register embeds original RALST register.
|
||||
*/
|
||||
#define MLXSW_REG_XRALST_ID 0x7812
|
||||
#define MLXSW_REG_XRALST_LEN 0x108
|
||||
#define MLXSW_REG_XRALST_RALST_OFFSET 0x04
|
||||
|
||||
MLXSW_REG_DEFINE(xralst, MLXSW_REG_XRALST_ID, MLXSW_REG_XRALST_LEN);
|
||||
|
||||
static inline void mlxsw_reg_xralst_pack(char *payload, u8 root_bin, u8 tree_id)
|
||||
{
|
||||
char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
|
||||
|
||||
MLXSW_REG_ZERO(xralst, payload);
|
||||
mlxsw_reg_ralst_pack(ralst_payload, root_bin, tree_id);
|
||||
}
|
||||
|
||||
static inline void mlxsw_reg_xralst_bin_pack(char *payload, u8 bin_number,
|
||||
u8 left_child_bin,
|
||||
u8 right_child_bin)
|
||||
{
|
||||
char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
|
||||
|
||||
mlxsw_reg_ralst_bin_pack(ralst_payload, bin_number, left_child_bin,
|
||||
right_child_bin);
|
||||
}
|
||||
|
||||
/* XRALTB - XM Router Algorithmic LPM Tree Binding Register
|
||||
* --------------------------------------------------------
|
||||
* The XRALTB register is used to bind virtual router and protocol
|
||||
* to an allocated LPM tree.
|
||||
*
|
||||
* This register embeds original RALTB register.
|
||||
*/
|
||||
#define MLXSW_REG_XRALTB_ID 0x7813
|
||||
#define MLXSW_REG_XRALTB_LEN 0x08
|
||||
#define MLXSW_REG_XRALTB_RALTB_OFFSET 0x04
|
||||
|
||||
MLXSW_REG_DEFINE(xraltb, MLXSW_REG_XRALTB_ID, MLXSW_REG_XRALTB_LEN);
|
||||
|
||||
static inline void mlxsw_reg_xraltb_pack(char *payload, u16 virtual_router,
|
||||
enum mlxsw_reg_ralxx_protocol protocol,
|
||||
u8 tree_id)
|
||||
{
|
||||
char *raltb_payload = payload + MLXSW_REG_XRALTB_RALTB_OFFSET;
|
||||
|
||||
MLXSW_REG_ZERO(xraltb, payload);
|
||||
mlxsw_reg_raltb_pack(raltb_payload, virtual_router, protocol, tree_id);
|
||||
}
|
||||
|
||||
/* MFCR - Management Fan Control Register
|
||||
* --------------------------------------
|
||||
* This register controls the settings of the Fan Speed PWM mechanism.
|
||||
|
@ -12510,9 +12430,6 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
|
|||
MLXSW_REG(rigr2),
|
||||
MLXSW_REG(recr2),
|
||||
MLXSW_REG(rmft2),
|
||||
MLXSW_REG(xralta),
|
||||
MLXSW_REG(xralst),
|
||||
MLXSW_REG(xraltb),
|
||||
MLXSW_REG(mfcr),
|
||||
MLXSW_REG(mfsc),
|
||||
MLXSW_REG(mfsm),
|
||||
|
|
|
@ -484,7 +484,6 @@ struct mlxsw_sp_fib {
|
|||
struct mlxsw_sp_vr *vr;
|
||||
struct mlxsw_sp_lpm_tree *lpm_tree;
|
||||
enum mlxsw_sp_l3proto proto;
|
||||
const struct mlxsw_sp_router_ll_ops *ll_ops;
|
||||
};
|
||||
|
||||
struct mlxsw_sp_vr {
|
||||
|
@ -498,31 +497,12 @@ struct mlxsw_sp_vr {
|
|||
refcount_t ul_rif_refcnt;
|
||||
};
|
||||
|
||||
static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
|
||||
{
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
|
||||
xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
|
||||
{
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
|
||||
xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
|
||||
{
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
|
||||
xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
|
||||
}
|
||||
|
||||
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
|
||||
|
||||
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_vr *vr,
|
||||
enum mlxsw_sp_l3proto proto)
|
||||
{
|
||||
const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
|
||||
struct mlxsw_sp_lpm_tree *lpm_tree;
|
||||
struct mlxsw_sp_fib *fib;
|
||||
int err;
|
||||
|
@ -538,7 +518,6 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
|
|||
fib->proto = proto;
|
||||
fib->vr = vr;
|
||||
fib->lpm_tree = lpm_tree;
|
||||
fib->ll_ops = ll_ops;
|
||||
mlxsw_sp_lpm_tree_hold(lpm_tree);
|
||||
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
|
||||
if (err)
|
||||
|
@ -577,36 +556,33 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
|
|||
}
|
||||
|
||||
static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct mlxsw_sp_router_ll_ops *ll_ops,
|
||||
struct mlxsw_sp_lpm_tree *lpm_tree)
|
||||
{
|
||||
char xralta_pl[MLXSW_REG_XRALTA_LEN];
|
||||
char ralta_pl[MLXSW_REG_RALTA_LEN];
|
||||
|
||||
mlxsw_reg_xralta_pack(xralta_pl, true,
|
||||
(enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
|
||||
lpm_tree->id);
|
||||
return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
|
||||
mlxsw_reg_ralta_pack(ralta_pl, true,
|
||||
(enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
|
||||
lpm_tree->id);
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
|
||||
}
|
||||
|
||||
static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct mlxsw_sp_router_ll_ops *ll_ops,
|
||||
struct mlxsw_sp_lpm_tree *lpm_tree)
|
||||
{
|
||||
char xralta_pl[MLXSW_REG_XRALTA_LEN];
|
||||
char ralta_pl[MLXSW_REG_RALTA_LEN];
|
||||
|
||||
mlxsw_reg_xralta_pack(xralta_pl, false,
|
||||
(enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
|
||||
lpm_tree->id);
|
||||
ll_ops->ralta_write(mlxsw_sp, xralta_pl);
|
||||
mlxsw_reg_ralta_pack(ralta_pl, false,
|
||||
(enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
|
||||
lpm_tree->id);
|
||||
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
|
||||
}
|
||||
|
||||
static int
|
||||
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct mlxsw_sp_router_ll_ops *ll_ops,
|
||||
struct mlxsw_sp_prefix_usage *prefix_usage,
|
||||
struct mlxsw_sp_lpm_tree *lpm_tree)
|
||||
{
|
||||
char xralst_pl[MLXSW_REG_XRALST_LEN];
|
||||
char ralst_pl[MLXSW_REG_RALST_LEN];
|
||||
u8 root_bin = 0;
|
||||
u8 prefix;
|
||||
u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
|
||||
|
@ -614,20 +590,19 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
|
|||
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
|
||||
root_bin = prefix;
|
||||
|
||||
mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
|
||||
mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
|
||||
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
|
||||
if (prefix == 0)
|
||||
continue;
|
||||
mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
|
||||
MLXSW_REG_RALST_BIN_NO_CHILD);
|
||||
mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
|
||||
MLXSW_REG_RALST_BIN_NO_CHILD);
|
||||
last_prefix = prefix;
|
||||
}
|
||||
return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
|
||||
}
|
||||
|
||||
static struct mlxsw_sp_lpm_tree *
|
||||
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct mlxsw_sp_router_ll_ops *ll_ops,
|
||||
struct mlxsw_sp_prefix_usage *prefix_usage,
|
||||
enum mlxsw_sp_l3proto proto)
|
||||
{
|
||||
|
@ -638,11 +613,12 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
|
|||
if (!lpm_tree)
|
||||
return ERR_PTR(-EBUSY);
|
||||
lpm_tree->proto = proto;
|
||||
err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
|
||||
err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
|
||||
err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
|
||||
lpm_tree);
|
||||
if (err)
|
||||
goto err_left_struct_set;
|
||||
memcpy(&lpm_tree->prefix_usage, prefix_usage,
|
||||
|
@ -653,15 +629,14 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
|
|||
return lpm_tree;
|
||||
|
||||
err_left_struct_set:
|
||||
mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
|
||||
mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct mlxsw_sp_router_ll_ops *ll_ops,
|
||||
struct mlxsw_sp_lpm_tree *lpm_tree)
|
||||
{
|
||||
mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
|
||||
mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
|
||||
}
|
||||
|
||||
static struct mlxsw_sp_lpm_tree *
|
||||
|
@ -669,7 +644,6 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
|
|||
struct mlxsw_sp_prefix_usage *prefix_usage,
|
||||
enum mlxsw_sp_l3proto proto)
|
||||
{
|
||||
const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
|
||||
struct mlxsw_sp_lpm_tree *lpm_tree;
|
||||
int i;
|
||||
|
||||
|
@ -683,7 +657,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
|
|||
return lpm_tree;
|
||||
}
|
||||
}
|
||||
return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
|
||||
return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
|
||||
}
|
||||
|
||||
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
|
||||
|
@ -694,11 +668,8 @@ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
|
|||
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_lpm_tree *lpm_tree)
|
||||
{
|
||||
const struct mlxsw_sp_router_ll_ops *ll_ops =
|
||||
mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
|
||||
|
||||
if (--lpm_tree->ref_count == 0)
|
||||
mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
|
||||
mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
|
||||
}
|
||||
|
||||
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
|
||||
|
@ -788,23 +759,23 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
|
|||
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct mlxsw_sp_fib *fib, u8 tree_id)
|
||||
{
|
||||
char xraltb_pl[MLXSW_REG_XRALTB_LEN];
|
||||
char raltb_pl[MLXSW_REG_RALTB_LEN];
|
||||
|
||||
mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
|
||||
(enum mlxsw_reg_ralxx_protocol) fib->proto,
|
||||
tree_id);
|
||||
return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
|
||||
mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
|
||||
(enum mlxsw_reg_ralxx_protocol) fib->proto,
|
||||
tree_id);
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct mlxsw_sp_fib *fib)
|
||||
{
|
||||
char xraltb_pl[MLXSW_REG_XRALTB_LEN];
|
||||
char raltb_pl[MLXSW_REG_RALTB_LEN];
|
||||
|
||||
/* Bind to tree 0 which is default */
|
||||
mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
|
||||
(enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
|
||||
return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
|
||||
mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
|
||||
(enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
|
||||
}
|
||||
|
||||
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
|
||||
|
@ -10245,12 +10216,6 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
|
|||
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
|
||||
}
|
||||
|
||||
static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
|
||||
.ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
|
||||
.ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
|
||||
.raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
|
||||
};
|
||||
|
||||
static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
u16 lb_rif_index;
|
||||
|
@ -10324,9 +10289,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
|
|||
if (err)
|
||||
goto err_router_ops_init;
|
||||
|
||||
router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_router_ll_basic_ops;
|
||||
router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
|
||||
|
||||
INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
|
||||
INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
|
||||
mlxsw_sp_nh_grp_activity_work);
|
||||
|
|
|
@ -51,8 +51,6 @@ struct mlxsw_sp_router {
|
|||
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
|
||||
struct mlxsw_sp_router_nve_decap nve_decap_config;
|
||||
struct mutex lock; /* Protects shared router resources */
|
||||
/* One set of ops for each protocol: IPv4 and IPv6 */
|
||||
const struct mlxsw_sp_router_ll_ops *proto_ll_ops[MLXSW_SP_L3_PROTO_MAX];
|
||||
struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
|
||||
u16 lb_rif_index;
|
||||
const struct mlxsw_sp_adj_grp_size_range *adj_grp_size_ranges;
|
||||
|
@ -64,15 +62,6 @@ struct mlxsw_sp_router {
|
|||
u32 adj_trap_index;
|
||||
};
|
||||
|
||||
/* Low-level router ops. Basically this is to handle the different
|
||||
* register sets to work with ordinary and XM trees and FIB entries.
|
||||
*/
|
||||
struct mlxsw_sp_router_ll_ops {
|
||||
int (*ralta_write)(struct mlxsw_sp *mlxsw_sp, char *xralta_pl);
|
||||
int (*ralst_write)(struct mlxsw_sp *mlxsw_sp, char *xralst_pl);
|
||||
int (*raltb_write)(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl);
|
||||
};
|
||||
|
||||
struct mlxsw_sp_rif_ipip_lb;
|
||||
struct mlxsw_sp_rif_ipip_lb_config {
|
||||
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
|
||||
|
|
Loading…
Reference in New Issue