Merge branch 'per-nexthop-offload'
Jiri Pirko says: ==================== ipv4: fib: Provide per-nexthop offload indication Ido says: Offload indication for IPv4 routes is currently set in the FIB info's flags. When multipath routes are employed, this can lead to a route being marked as offloaded although only one of its nexthops is actually offloaded. Instead, this patchset aims to proivde a higher resolution for the offload indication and report it on a per-nexthop basis. Example output from patched iproute: $ ip route show 192.168.200.0/24 192.168.200.0/24 nexthop via 192.168.100.2 dev enp3s0np7 weight 1 offload nexthop via 192.168.101.3 dev enp3s0np8 weight 1 And once the second gateway is resolved: $ ip route show 192.168.200.0/24 192.168.200.0/24 nexthop via 192.168.100.2 dev enp3s0np7 weight 1 offload nexthop via 192.168.101.3 dev enp3s0np8 weight 1 offload First patch teaches the kernel to look for the offload indication in the nexthop flags. Patches 2-5 adjust current capable drivers to provide offload indication on a per-nexthop basis. Last patch removes no longer used functions to set offload indication in the FIB info's flags. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
5a4d148f0d
|
@ -394,7 +394,6 @@ struct mlxsw_sp_fib_entry {
|
|||
enum mlxsw_sp_fib_entry_type type;
|
||||
struct list_head nexthop_group_node;
|
||||
struct mlxsw_sp_nexthop_group *nh_group;
|
||||
bool offloaded;
|
||||
};
|
||||
|
||||
struct mlxsw_sp_fib4_entry {
|
||||
|
@ -1653,6 +1652,24 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
|
||||
enum mlxsw_reg_ralue_op op, int err);
|
||||
|
||||
static void
|
||||
mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
|
||||
{
|
||||
enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
|
||||
struct mlxsw_sp_fib_entry *fib_entry;
|
||||
|
||||
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
|
||||
if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
|
||||
fib_entry))
|
||||
continue;
|
||||
mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_nexthop_group *nh_grp)
|
||||
|
@ -1740,6 +1757,10 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
|
|||
dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
|
||||
goto set_trap;
|
||||
}
|
||||
|
||||
/* Offload state within the group changed, so update the flags. */
|
||||
mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
|
||||
|
||||
return;
|
||||
|
||||
set_trap:
|
||||
|
@ -2103,13 +2124,45 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
|
||||
{
|
||||
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
|
||||
int i;
|
||||
|
||||
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
|
||||
nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nh_grp->count; i++) {
|
||||
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
|
||||
|
||||
if (nh->offloaded)
|
||||
nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
|
||||
else
|
||||
nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
|
||||
{
|
||||
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nh_grp->count; i++) {
|
||||
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
|
||||
|
||||
nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
|
||||
}
|
||||
}
|
||||
|
||||
static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
|
||||
{
|
||||
fib_entry->offloaded = true;
|
||||
|
||||
switch (fib_entry->fib_node->fib->proto) {
|
||||
case MLXSW_SP_L3_PROTO_IPV4:
|
||||
fib_info_offload_inc(fib_entry->nh_group->key.fi);
|
||||
mlxsw_sp_fib4_entry_offload_set(fib_entry);
|
||||
break;
|
||||
case MLXSW_SP_L3_PROTO_IPV6:
|
||||
WARN_ON_ONCE(1);
|
||||
|
@ -2121,13 +2174,11 @@ mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
|
|||
{
|
||||
switch (fib_entry->fib_node->fib->proto) {
|
||||
case MLXSW_SP_L3_PROTO_IPV4:
|
||||
fib_info_offload_dec(fib_entry->nh_group->key.fi);
|
||||
mlxsw_sp_fib4_entry_offload_unset(fib_entry);
|
||||
break;
|
||||
case MLXSW_SP_L3_PROTO_IPV6:
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
fib_entry->offloaded = false;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2136,17 +2187,13 @@ mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
|
|||
{
|
||||
switch (op) {
|
||||
case MLXSW_REG_RALUE_OP_WRITE_DELETE:
|
||||
if (!fib_entry->offloaded)
|
||||
return;
|
||||
return mlxsw_sp_fib_entry_offload_unset(fib_entry);
|
||||
case MLXSW_REG_RALUE_OP_WRITE_WRITE:
|
||||
if (err)
|
||||
return;
|
||||
if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
|
||||
!fib_entry->offloaded)
|
||||
if (mlxsw_sp_fib_entry_should_offload(fib_entry))
|
||||
mlxsw_sp_fib_entry_offload_set(fib_entry);
|
||||
else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
|
||||
fib_entry->offloaded)
|
||||
else if (!mlxsw_sp_fib_entry_should_offload(fib_entry))
|
||||
mlxsw_sp_fib_entry_offload_unset(fib_entry);
|
||||
return;
|
||||
default:
|
||||
|
|
|
@ -2761,7 +2761,7 @@ static int ofdpa_fib4_add(struct rocker *rocker,
|
|||
fen_info->tb_id, 0);
|
||||
if (err)
|
||||
return err;
|
||||
fib_info_offload_inc(fen_info->fi);
|
||||
fen_info->fi->fib_nh->nh_flags |= RTNH_F_OFFLOAD;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2776,7 +2776,7 @@ static int ofdpa_fib4_del(struct rocker *rocker,
|
|||
ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
|
||||
if (!ofdpa_port)
|
||||
return 0;
|
||||
fib_info_offload_dec(fen_info->fi);
|
||||
fen_info->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
|
||||
return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
|
||||
fen_info->dst_len, fen_info->fi,
|
||||
fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
|
||||
|
@ -2803,7 +2803,7 @@ static void ofdpa_fib4_abort(struct rocker *rocker)
|
|||
rocker);
|
||||
if (!ofdpa_port)
|
||||
continue;
|
||||
fib_info_offload_dec(flow_entry->fi);
|
||||
flow_entry->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
|
||||
ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
|
||||
flow_entry);
|
||||
}
|
||||
|
|
|
@ -124,7 +124,6 @@ struct fib_info {
|
|||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||
int fib_weight;
|
||||
#endif
|
||||
unsigned int fib_offload_cnt;
|
||||
struct rcu_head rcu;
|
||||
struct fib_nh fib_nh[0];
|
||||
#define fib_dev fib_nh[0].nh_dev
|
||||
|
@ -177,18 +176,6 @@ struct fib_result_nl {
|
|||
|
||||
__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
|
||||
|
||||
static inline void fib_info_offload_inc(struct fib_info *fi)
|
||||
{
|
||||
fi->fib_offload_cnt++;
|
||||
fi->fib_flags |= RTNH_F_OFFLOAD;
|
||||
}
|
||||
|
||||
static inline void fib_info_offload_dec(struct fib_info *fi)
|
||||
{
|
||||
if (--fi->fib_offload_cnt == 0)
|
||||
fi->fib_flags &= ~RTNH_F_OFFLOAD;
|
||||
}
|
||||
|
||||
#define FIB_RES_SADDR(net, res) \
|
||||
((FIB_RES_NH(res).nh_saddr_genid == \
|
||||
atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
|
||||
|
|
|
@ -1342,6 +1342,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
|
|||
IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
|
||||
rtm->rtm_flags |= RTNH_F_DEAD;
|
||||
}
|
||||
if (fi->fib_nh->nh_flags & RTNH_F_OFFLOAD)
|
||||
rtm->rtm_flags |= RTNH_F_OFFLOAD;
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
if (fi->fib_nh[0].nh_tclassid &&
|
||||
nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
|
||||
|
|
Loading…
Reference in New Issue