net/ipv6: Rename fib6_info struct elements

Change the prefix for fib6_info struct elements from rt6i_ to fib6_.
rt6i_pcpu and rt6i_exception_bucket are left as is given that they
point to rt6_info entries.

Rename only; not functional change intended.

Signed-off-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David Ahern 2018-04-18 15:38:59 -07:00 committed by David S. Miller
parent 88078d98d1
commit 93c2fb253d
8 changed files with 298 additions and 298 deletions

View File

@ -4705,17 +4705,17 @@ static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
* are trapped to the CPU, so no need to program specific routes
* for them.
*/
if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
return true;
/* Multicast routes aren't supported, so ignore them. Neighbour
* Discovery packets are specifically trapped.
*/
if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
return true;
/* Cloned routes are irrelevant in the forwarding path. */
if (rt->rt6i_flags & RTF_CACHE)
if (rt->fib6_flags & RTF_CACHE)
return true;
return false;
@ -4759,7 +4759,7 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
{
/* RTF_CACHE routes are ignored */
return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
}
static struct fib6_info *
@ -4784,16 +4784,16 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
/* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
* virtual router.
*/
if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
continue;
if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
break;
if (rt->rt6i_metric < nrt->rt6i_metric)
if (rt->fib6_metric < nrt->fib6_metric)
continue;
if (rt->rt6i_metric == nrt->rt6i_metric &&
if (rt->fib6_metric == nrt->fib6_metric &&
mlxsw_sp_fib6_rt_can_mp(rt))
return fib6_entry;
if (rt->rt6i_metric > nrt->rt6i_metric)
if (rt->fib6_metric > nrt->fib6_metric)
break;
}
@ -4899,7 +4899,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
return rt->rt6i_flags & RTF_GATEWAY ||
return rt->fib6_flags & RTF_GATEWAY ||
mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
}
@ -5092,9 +5092,9 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
* local, which will cause them to be trapped with a lower
* priority than packets that need to be locally received.
*/
if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
else if (rt->rt6i_flags & RTF_REJECT)
else if (rt->fib6_flags & RTF_REJECT)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
@ -5175,18 +5175,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
continue;
if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
break;
if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
if (replace && rt->fib6_metric == nrt->fib6_metric) {
if (mlxsw_sp_fib6_rt_can_mp(rt) ==
mlxsw_sp_fib6_rt_can_mp(nrt))
return fib6_entry;
if (mlxsw_sp_fib6_rt_can_mp(nrt))
fallback = fallback ?: fib6_entry;
}
if (rt->rt6i_metric > nrt->rt6i_metric)
if (rt->fib6_metric > nrt->fib6_metric)
return fallback ?: fib6_entry;
}
@ -5215,7 +5215,7 @@ mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
list_for_each_entry(last, &fib_node->entry_list, common.list) {
struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
break;
fib6_entry = last;
}
@ -5275,22 +5275,22 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib;
struct mlxsw_sp_vr *vr;
vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
if (!vr)
return NULL;
fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
sizeof(rt->rt6i_dst.addr),
rt->rt6i_dst.plen);
fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
sizeof(rt->fib6_dst.addr),
rt->fib6_dst.plen);
if (!fib_node)
return NULL;
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
rt->rt6i_metric == iter_rt->rt6i_metric &&
if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
rt->fib6_metric == iter_rt->fib6_metric &&
mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
return fib6_entry;
}
@ -5325,16 +5325,16 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp->router->aborted)
return 0;
if (rt->rt6i_src.plen)
if (rt->fib6_src.plen)
return -EINVAL;
if (mlxsw_sp_fib6_rt_should_ignore(rt))
return 0;
fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
&rt->rt6i_dst.addr,
sizeof(rt->rt6i_dst.addr),
rt->rt6i_dst.plen,
fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
&rt->fib6_dst.addr,
sizeof(rt->fib6_dst.addr),
rt->fib6_dst.plen,
MLXSW_SP_L3_PROTO_IPV6);
if (IS_ERR(fib_node))
return PTR_ERR(fib_node);

View File

@ -134,34 +134,34 @@ struct fib6_nh {
};
struct fib6_info {
struct fib6_table *rt6i_table;
struct fib6_table *fib6_table;
struct fib6_info __rcu *rt6_next;
struct fib6_node __rcu *rt6i_node;
struct fib6_node __rcu *fib6_node;
/* Multipath routes:
* siblings is a list of fib6_info that have the the same metric/weight,
* destination, but not the same gateway. nsiblings is just a cache
* to speed up lookup.
*/
struct list_head rt6i_siblings;
unsigned int rt6i_nsiblings;
struct list_head fib6_siblings;
unsigned int fib6_nsiblings;
atomic_t rt6i_ref;
struct inet6_dev *rt6i_idev;
atomic_t fib6_ref;
struct inet6_dev *fib6_idev;
unsigned long expires;
struct dst_metrics *fib6_metrics;
#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
struct rt6key rt6i_dst;
u32 rt6i_flags;
struct rt6key rt6i_src;
struct rt6key rt6i_prefsrc;
struct rt6key fib6_dst;
u32 fib6_flags;
struct rt6key fib6_src;
struct rt6key fib6_prefsrc;
struct rt6_info * __percpu *rt6i_pcpu;
struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
u32 rt6i_metric;
u8 rt6i_protocol;
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
u8 exception_bucket_flushed:1,
should_flush:1,
@ -206,7 +206,7 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
static inline void fib6_clean_expires(struct fib6_info *f6i)
{
f6i->rt6i_flags &= ~RTF_EXPIRES;
f6i->fib6_flags &= ~RTF_EXPIRES;
f6i->expires = 0;
}
@ -214,12 +214,12 @@ static inline void fib6_set_expires(struct fib6_info *f6i,
unsigned long expires)
{
f6i->expires = expires;
f6i->rt6i_flags |= RTF_EXPIRES;
f6i->fib6_flags |= RTF_EXPIRES;
}
static inline bool fib6_check_expired(const struct fib6_info *f6i)
{
if (f6i->rt6i_flags & RTF_EXPIRES)
if (f6i->fib6_flags & RTF_EXPIRES)
return time_after(jiffies, f6i->expires);
return false;
}
@ -250,14 +250,14 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
* Return true if we can get cookie safely
* Return false if not
*/
static inline bool rt6_get_cookie_safe(const struct fib6_info *rt,
static inline bool rt6_get_cookie_safe(const struct fib6_info *f6i,
u32 *cookie)
{
struct fib6_node *fn;
bool status = false;
rcu_read_lock();
fn = rcu_dereference(rt->rt6i_node);
fn = rcu_dereference(f6i->fib6_node);
if (fn) {
*cookie = fn->fn_sernum;
@ -295,12 +295,12 @@ void fib6_info_destroy(struct fib6_info *f6i);
static inline void fib6_info_hold(struct fib6_info *f6i)
{
atomic_inc(&f6i->rt6i_ref);
atomic_inc(&f6i->fib6_ref);
}
static inline void fib6_info_release(struct fib6_info *f6i)
{
if (f6i && atomic_dec_and_test(&f6i->rt6i_ref))
if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
fib6_info_destroy(f6i);
}

View File

@ -66,9 +66,9 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
static inline bool rt6_qualify_for_ecmp(const struct fib6_info *rt)
static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
{
return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
RTF_GATEWAY;
}
@ -102,23 +102,23 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg);
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
int ip6_ins_rt(struct net *net, struct fib6_info *rt);
int ip6_del_rt(struct net *net, struct fib6_info *rt);
int ip6_ins_rt(struct net *net, struct fib6_info *f6i);
int ip6_del_rt(struct net *net, struct fib6_info *f6i);
void rt6_flush_exceptions(struct fib6_info *rt);
void rt6_age_exceptions(struct fib6_info *rt, struct fib6_gc_args *gc_args,
void rt6_flush_exceptions(struct fib6_info *f6i);
void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args,
unsigned long now);
static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *rt,
static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i,
const struct in6_addr *daddr,
unsigned int prefs,
struct in6_addr *saddr)
{
struct inet6_dev *idev = rt ? rt->rt6i_idev : NULL;
struct inet6_dev *idev = f6i ? f6i->fib6_idev : NULL;
int err = 0;
if (rt && rt->rt6i_prefsrc.plen)
*saddr = rt->rt6i_prefsrc.addr;
if (f6i && f6i->fib6_prefsrc.plen)
*saddr = f6i->fib6_prefsrc.addr;
else
err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
daddr, prefs, saddr);
@ -176,14 +176,14 @@ struct rt6_rtnl_dump_arg {
struct net *net;
};
int rt6_dump_route(struct fib6_info *rt, void *p_arg);
int rt6_dump_route(struct fib6_info *f6i, void *p_arg);
void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
void rt6_sync_up(struct net_device *dev, unsigned int nh_flags);
void rt6_disable_ip(struct net_device *dev, unsigned long event);
void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
void rt6_multipath_rebalance(struct fib6_info *rt);
void rt6_multipath_rebalance(struct fib6_info *f6i);
void rt6_uncached_list_add(struct rt6_info *rt);
void rt6_uncached_list_del(struct rt6_info *rt);
@ -274,7 +274,7 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *b)
{
return a->fib6_nh.nh_dev == b->fib6_nh.nh_dev &&
a->rt6i_idev == b->rt6i_idev &&
a->fib6_idev == b->fib6_idev &&
ipv6_addr_equal(&a->fib6_nh.nh_gw, &b->fib6_nh.nh_gw) &&
!lwtunnel_cmp_encap(a->fib6_nh.nh_lwtstate, b->fib6_nh.nh_lwtstate);
}

View File

@ -1178,19 +1178,19 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
static void
cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
{
struct fib6_info *rt;
struct fib6_info *f6i;
rt = addrconf_get_prefix_route(&ifp->addr,
f6i = addrconf_get_prefix_route(&ifp->addr,
ifp->prefix_len,
ifp->idev->dev,
0, RTF_GATEWAY | RTF_DEFAULT);
if (rt) {
if (f6i) {
if (del_rt)
ip6_del_rt(dev_net(ifp->idev->dev), rt);
ip6_del_rt(dev_net(ifp->idev->dev), f6i);
else {
if (!(rt->rt6i_flags & RTF_EXPIRES))
fib6_set_expires(rt, expires);
fib6_info_release(rt);
if (!(f6i->fib6_flags & RTF_EXPIRES))
fib6_set_expires(f6i, expires);
fib6_info_release(f6i);
}
}
}
@ -2370,9 +2370,9 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
for_each_fib6_node_rt_rcu(fn) {
if (rt->fib6_nh.nh_dev->ifindex != dev->ifindex)
continue;
if ((rt->rt6i_flags & flags) != flags)
if ((rt->fib6_flags & flags) != flags)
continue;
if ((rt->rt6i_flags & noflags) != 0)
if ((rt->fib6_flags & noflags) != 0)
continue;
fib6_info_hold(rt);
break;
@ -3341,11 +3341,11 @@ static int fixup_permanent_addr(struct net *net,
struct inet6_dev *idev,
struct inet6_ifaddr *ifp)
{
/* !rt6i_node means the host route was removed from the
/* !fib6_node means the host route was removed from the
* FIB, for example, if 'lo' device is taken down. In that
* case regenerate the host route.
*/
if (!ifp->rt || !ifp->rt->rt6i_node) {
if (!ifp->rt || !ifp->rt->fib6_node) {
struct fib6_info *rt, *prev;
rt = addrconf_dst_alloc(net, idev, &ifp->addr, false,
@ -5612,7 +5612,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
* our DAD process, so we don't need
* to do it again
*/
if (!rcu_access_pointer(ifp->rt->rt6i_node))
if (!rcu_access_pointer(ifp->rt->fib6_node))
ip6_ins_rt(net, ifp->rt);
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);

View File

@ -218,10 +218,10 @@ static void aca_put(struct ifacaddr6 *ac)
}
}
static struct ifacaddr6 *aca_alloc(struct fib6_info *rt,
static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i,
const struct in6_addr *addr)
{
struct inet6_dev *idev = rt->rt6i_idev;
struct inet6_dev *idev = f6i->fib6_idev;
struct ifacaddr6 *aca;
aca = kzalloc(sizeof(*aca), GFP_ATOMIC);
@ -231,8 +231,8 @@ static struct ifacaddr6 *aca_alloc(struct fib6_info *rt,
aca->aca_addr = *addr;
in6_dev_hold(idev);
aca->aca_idev = idev;
fib6_info_hold(rt);
aca->aca_rt = rt;
fib6_info_hold(f6i);
aca->aca_rt = f6i;
aca->aca_users = 1;
/* aca_tstamp should be updated upon changes */
aca->aca_cstamp = aca->aca_tstamp = jiffies;

View File

@ -105,12 +105,12 @@ enum {
FIB6_NO_SERNUM_CHANGE = 0,
};
void fib6_update_sernum(struct net *net, struct fib6_info *rt)
void fib6_update_sernum(struct net *net, struct fib6_info *f6i)
{
struct fib6_node *fn;
fn = rcu_dereference_protected(rt->rt6i_node,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
fn = rcu_dereference_protected(f6i->fib6_node,
lockdep_is_held(&f6i->fib6_table->tb6_lock));
if (fn)
fn->fn_sernum = fib6_new_sernum(net);
}
@ -159,10 +159,10 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags)
return NULL;
}
INIT_LIST_HEAD(&f6i->rt6i_siblings);
INIT_LIST_HEAD(&f6i->fib6_siblings);
f6i->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
atomic_inc(&f6i->rt6i_ref);
atomic_inc(&f6i->fib6_ref);
return f6i;
}
@ -172,7 +172,7 @@ void fib6_info_destroy(struct fib6_info *f6i)
struct rt6_exception_bucket *bucket;
struct dst_metrics *m;
WARN_ON(f6i->rt6i_node);
WARN_ON(f6i->fib6_node);
bucket = rcu_dereference_protected(f6i->rt6i_exception_bucket, 1);
if (bucket) {
@ -197,8 +197,8 @@ void fib6_info_destroy(struct fib6_info *f6i)
}
}
if (f6i->rt6i_idev)
in6_dev_put(f6i->rt6i_idev);
if (f6i->fib6_idev)
in6_dev_put(f6i->fib6_idev);
if (f6i->fib6_nh.nh_dev)
dev_put(f6i->fib6_nh.nh_dev);
@ -401,7 +401,7 @@ static int call_fib6_entry_notifiers(struct net *net,
.rt = rt,
};
rt->rt6i_table->fib_seq++;
rt->fib6_table->fib_seq++;
return call_fib6_notifiers(net, event_type, &info.info);
}
@ -483,10 +483,10 @@ static int fib6_dump_node(struct fib6_walker *w)
* last sibling of this route (no need to dump the
* sibling routes again)
*/
if (rt->rt6i_nsiblings)
rt = list_last_entry(&rt->rt6i_siblings,
if (rt->fib6_nsiblings)
rt = list_last_entry(&rt->fib6_siblings,
struct fib6_info,
rt6i_siblings);
fib6_siblings);
}
w->leaf = NULL;
return 0;
@ -810,7 +810,7 @@ insert_above:
RCU_INIT_POINTER(in->parent, pn);
in->leaf = fn->leaf;
atomic_inc(&rcu_dereference_protected(in->leaf,
lockdep_is_held(&table->tb6_lock))->rt6i_ref);
lockdep_is_held(&table->tb6_lock))->fib6_ref);
/* update parent pointer */
if (dir)
@ -865,9 +865,9 @@ insert_above:
static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
struct net *net)
{
struct fib6_table *table = rt->rt6i_table;
struct fib6_table *table = rt->fib6_table;
if (atomic_read(&rt->rt6i_ref) != 1) {
if (atomic_read(&rt->fib6_ref) != 1) {
/* This route is used as dummy address holder in some split
* nodes. It is not leaked, but it still holds other resources,
* which must be released in time. So, scan ascendant nodes
@ -880,7 +880,7 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
struct fib6_info *new_leaf;
if (!(fn->fn_flags & RTN_RTINFO) && leaf == rt) {
new_leaf = fib6_find_prefix(net, table, fn);
atomic_inc(&new_leaf->rt6i_ref);
atomic_inc(&new_leaf->fib6_ref);
rcu_assign_pointer(fn->leaf, new_leaf);
fib6_info_release(rt);
@ -919,7 +919,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
struct netlink_ext_ack *extack)
{
struct fib6_info *leaf = rcu_dereference_protected(fn->leaf,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
lockdep_is_held(&rt->fib6_table->tb6_lock));
struct fib6_info *iter = NULL;
struct fib6_info __rcu **ins;
struct fib6_info __rcu **fallback_ins = NULL;
@ -939,12 +939,12 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
for (iter = leaf; iter;
iter = rcu_dereference_protected(iter->rt6_next,
lockdep_is_held(&rt->rt6i_table->tb6_lock))) {
lockdep_is_held(&rt->fib6_table->tb6_lock))) {
/*
* Search for duplicates
*/
if (iter->rt6i_metric == rt->rt6i_metric) {
if (iter->fib6_metric == rt->fib6_metric) {
/*
* Same priority level
*/
@ -964,11 +964,11 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
}
if (rt6_duplicate_nexthop(iter, rt)) {
if (rt->rt6i_nsiblings)
rt->rt6i_nsiblings = 0;
if (!(iter->rt6i_flags & RTF_EXPIRES))
if (rt->fib6_nsiblings)
rt->fib6_nsiblings = 0;
if (!(iter->fib6_flags & RTF_EXPIRES))
return -EEXIST;
if (!(rt->rt6i_flags & RTF_EXPIRES))
if (!(rt->fib6_flags & RTF_EXPIRES))
fib6_clean_expires(iter);
else
fib6_set_expires(iter, rt->expires);
@ -988,10 +988,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
*/
if (rt_can_ecmp &&
rt6_qualify_for_ecmp(iter))
rt->rt6i_nsiblings++;
rt->fib6_nsiblings++;
}
if (iter->rt6i_metric > rt->rt6i_metric)
if (iter->fib6_metric > rt->fib6_metric)
break;
next_iter:
@ -1002,7 +1002,7 @@ next_iter:
/* No ECMP-able route found, replace first non-ECMP one */
ins = fallback_ins;
iter = rcu_dereference_protected(*ins,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
lockdep_is_held(&rt->fib6_table->tb6_lock));
found++;
}
@ -1011,34 +1011,34 @@ next_iter:
fn->rr_ptr = NULL;
/* Link this route to others same route. */
if (rt->rt6i_nsiblings) {
unsigned int rt6i_nsiblings;
if (rt->fib6_nsiblings) {
unsigned int fib6_nsiblings;
struct fib6_info *sibling, *temp_sibling;
/* Find the first route that have the same metric */
sibling = leaf;
while (sibling) {
if (sibling->rt6i_metric == rt->rt6i_metric &&
if (sibling->fib6_metric == rt->fib6_metric &&
rt6_qualify_for_ecmp(sibling)) {
list_add_tail(&rt->rt6i_siblings,
&sibling->rt6i_siblings);
list_add_tail(&rt->fib6_siblings,
&sibling->fib6_siblings);
break;
}
sibling = rcu_dereference_protected(sibling->rt6_next,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
lockdep_is_held(&rt->fib6_table->tb6_lock));
}
/* For each sibling in the list, increment the counter of
* siblings. BUG() if counters does not match, list of siblings
* is broken!
*/
rt6i_nsiblings = 0;
fib6_nsiblings = 0;
list_for_each_entry_safe(sibling, temp_sibling,
&rt->rt6i_siblings, rt6i_siblings) {
sibling->rt6i_nsiblings++;
BUG_ON(sibling->rt6i_nsiblings != rt->rt6i_nsiblings);
rt6i_nsiblings++;
&rt->fib6_siblings, fib6_siblings) {
sibling->fib6_nsiblings++;
BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings);
fib6_nsiblings++;
}
BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings);
BUG_ON(fib6_nsiblings != rt->fib6_nsiblings);
rt6_multipath_rebalance(temp_sibling);
}
@ -1059,8 +1059,8 @@ add:
return err;
rcu_assign_pointer(rt->rt6_next, iter);
atomic_inc(&rt->rt6i_ref);
rcu_assign_pointer(rt->rt6i_node, fn);
atomic_inc(&rt->fib6_ref);
rcu_assign_pointer(rt->fib6_node, fn);
rcu_assign_pointer(*ins, rt);
if (!info->skip_notify)
inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
@ -1087,8 +1087,8 @@ add:
if (err)
return err;
atomic_inc(&rt->rt6i_ref);
rcu_assign_pointer(rt->rt6i_node, fn);
atomic_inc(&rt->fib6_ref);
rcu_assign_pointer(rt->fib6_node, fn);
rt->rt6_next = iter->rt6_next;
rcu_assign_pointer(*ins, rt);
if (!info->skip_notify)
@ -1097,8 +1097,8 @@ add:
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
fn->fn_flags |= RTN_RTINFO;
}
nsiblings = iter->rt6i_nsiblings;
iter->rt6i_node = NULL;
nsiblings = iter->fib6_nsiblings;
iter->fib6_node = NULL;
fib6_purge_rt(iter, fn, info->nl_net);
if (rcu_access_pointer(fn->rr_ptr) == iter)
fn->rr_ptr = NULL;
@ -1108,13 +1108,13 @@ add:
/* Replacing an ECMP route, remove all siblings */
ins = &rt->rt6_next;
iter = rcu_dereference_protected(*ins,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
lockdep_is_held(&rt->fib6_table->tb6_lock));
while (iter) {
if (iter->rt6i_metric > rt->rt6i_metric)
if (iter->fib6_metric > rt->fib6_metric)
break;
if (rt6_qualify_for_ecmp(iter)) {
*ins = iter->rt6_next;
iter->rt6i_node = NULL;
iter->fib6_node = NULL;
fib6_purge_rt(iter, fn, info->nl_net);
if (rcu_access_pointer(fn->rr_ptr) == iter)
fn->rr_ptr = NULL;
@ -1125,7 +1125,7 @@ add:
ins = &iter->rt6_next;
}
iter = rcu_dereference_protected(*ins,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
lockdep_is_held(&rt->fib6_table->tb6_lock));
}
WARN_ON(nsiblings != 0);
}
@ -1137,7 +1137,7 @@ add:
static void fib6_start_gc(struct net *net, struct fib6_info *rt)
{
if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
(rt->rt6i_flags & RTF_EXPIRES))
(rt->fib6_flags & RTF_EXPIRES))
mod_timer(&net->ipv6.ip6_fib_timer,
jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
}
@ -1152,15 +1152,15 @@ void fib6_force_start_gc(struct net *net)
static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
int sernum)
{
struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
lockdep_is_held(&rt->fib6_table->tb6_lock));
/* paired with smp_rmb() in rt6_get_cookie_safe() */
smp_wmb();
while (fn) {
fn->fn_sernum = sernum;
fn = rcu_dereference_protected(fn->parent,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
lockdep_is_held(&rt->fib6_table->tb6_lock));
}
}
@ -1179,7 +1179,7 @@ void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt)
int fib6_add(struct fib6_node *root, struct fib6_info *rt,
struct nl_info *info, struct netlink_ext_ack *extack)
{
struct fib6_table *table = rt->rt6i_table;
struct fib6_table *table = rt->fib6_table;
struct fib6_node *fn, *pn = NULL;
int err = -ENOMEM;
int allow_create = 1;
@ -1196,8 +1196,8 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
fn = fib6_add_1(info->nl_net, table, root,
&rt->rt6i_dst.addr, rt->rt6i_dst.plen,
offsetof(struct fib6_info, rt6i_dst), allow_create,
&rt->fib6_dst.addr, rt->fib6_dst.plen,
offsetof(struct fib6_info, fib6_dst), allow_create,
replace_required, extack);
if (IS_ERR(fn)) {
err = PTR_ERR(fn);
@ -1208,7 +1208,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
pn = fn;
#ifdef CONFIG_IPV6_SUBTREES
if (rt->rt6i_src.plen) {
if (rt->fib6_src.plen) {
struct fib6_node *sn;
if (!rcu_access_pointer(fn->subtree)) {
@ -1229,7 +1229,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
if (!sfn)
goto failure;
atomic_inc(&info->nl_net->ipv6.fib6_null_entry->rt6i_ref);
atomic_inc(&info->nl_net->ipv6.fib6_null_entry->fib6_ref);
rcu_assign_pointer(sfn->leaf,
info->nl_net->ipv6.fib6_null_entry);
sfn->fn_flags = RTN_ROOT;
@ -1237,8 +1237,8 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
/* Now add the first leaf node to new subtree */
sn = fib6_add_1(info->nl_net, table, sfn,
&rt->rt6i_src.addr, rt->rt6i_src.plen,
offsetof(struct fib6_info, rt6i_src),
&rt->fib6_src.addr, rt->fib6_src.plen,
offsetof(struct fib6_info, fib6_src),
allow_create, replace_required, extack);
if (IS_ERR(sn)) {
@ -1256,8 +1256,8 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
rcu_assign_pointer(fn->subtree, sfn);
} else {
sn = fib6_add_1(info->nl_net, table, FIB6_SUBTREE(fn),
&rt->rt6i_src.addr, rt->rt6i_src.plen,
offsetof(struct fib6_info, rt6i_src),
&rt->fib6_src.addr, rt->fib6_src.plen,
offsetof(struct fib6_info, fib6_src),
allow_create, replace_required, extack);
if (IS_ERR(sn)) {
@ -1272,7 +1272,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
rcu_assign_pointer(fn->leaf,
info->nl_net->ipv6.fib6_null_entry);
} else {
atomic_inc(&rt->rt6i_ref);
atomic_inc(&rt->fib6_ref);
rcu_assign_pointer(fn->leaf, rt);
}
}
@ -1421,12 +1421,12 @@ struct fib6_node *fib6_lookup(struct fib6_node *root, const struct in6_addr *dad
struct fib6_node *fn;
struct lookup_args args[] = {
{
.offset = offsetof(struct fib6_info, rt6i_dst),
.offset = offsetof(struct fib6_info, fib6_dst),
.addr = daddr,
},
#ifdef CONFIG_IPV6_SUBTREES
{
.offset = offsetof(struct fib6_info, rt6i_src),
.offset = offsetof(struct fib6_info, fib6_src),
.addr = saddr,
},
#endif
@ -1511,7 +1511,7 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
struct fib6_node *fn;
fn = fib6_locate_1(root, daddr, dst_len,
offsetof(struct fib6_info, rt6i_dst),
offsetof(struct fib6_info, fib6_dst),
exact_match);
#ifdef CONFIG_IPV6_SUBTREES
@ -1522,7 +1522,7 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
if (subtree) {
fn = fib6_locate_1(subtree, saddr, src_len,
offsetof(struct fib6_info, rt6i_src),
offsetof(struct fib6_info, fib6_src),
exact_match);
}
}
@ -1706,7 +1706,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
/* Unlink it */
*rtp = rt->rt6_next;
rt->rt6i_node = NULL;
rt->fib6_node = NULL;
net->ipv6.rt6_stats->fib_rt_entries--;
net->ipv6.rt6_stats->fib_discarded_routes++;
@ -1718,14 +1718,14 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
fn->rr_ptr = NULL;
/* Remove this entry from other siblings */
if (rt->rt6i_nsiblings) {
if (rt->fib6_nsiblings) {
struct fib6_info *sibling, *next_sibling;
list_for_each_entry_safe(sibling, next_sibling,
&rt->rt6i_siblings, rt6i_siblings)
sibling->rt6i_nsiblings--;
rt->rt6i_nsiblings = 0;
list_del_init(&rt->rt6i_siblings);
&rt->fib6_siblings, fib6_siblings)
sibling->fib6_nsiblings--;
rt->fib6_nsiblings = 0;
list_del_init(&rt->fib6_siblings);
rt6_multipath_rebalance(next_sibling);
}
@ -1765,9 +1765,9 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
/* Need to own table->tb6_lock */
int fib6_del(struct fib6_info *rt, struct nl_info *info)
{
struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
struct fib6_table *table = rt->rt6i_table;
struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
lockdep_is_held(&rt->fib6_table->tb6_lock));
struct fib6_table *table = rt->fib6_table;
struct net *net = info->nl_net;
struct fib6_info __rcu **rtp;
struct fib6_info __rcu **rtp_next;
@ -1951,17 +1951,17 @@ static int fib6_clean_node(struct fib6_walker *w)
#if RT6_DEBUG >= 2
pr_debug("%s: del failed: rt=%p@%p err=%d\n",
__func__, rt,
rcu_access_pointer(rt->rt6i_node),
rcu_access_pointer(rt->fib6_node),
res);
#endif
continue;
}
return 0;
} else if (res == -2) {
if (WARN_ON(!rt->rt6i_nsiblings))
if (WARN_ON(!rt->fib6_nsiblings))
continue;
rt = list_last_entry(&rt->rt6i_siblings,
struct fib6_info, rt6i_siblings);
rt = list_last_entry(&rt->fib6_siblings,
struct fib6_info, fib6_siblings);
continue;
}
WARN_ON(res != 0);
@ -2045,7 +2045,7 @@ static int fib6_age(struct fib6_info *rt, void *arg)
* Routes are expired even if they are in use.
*/
if (rt->rt6i_flags & RTF_EXPIRES && rt->expires) {
if (rt->fib6_flags & RTF_EXPIRES && rt->expires) {
if (time_after(now, rt->expires)) {
RT6_TRACE("expiring %p\n", rt);
return -1;
@ -2243,22 +2243,22 @@ static int ipv6_route_seq_show(struct seq_file *seq, void *v)
struct ipv6_route_iter *iter = seq->private;
const struct net_device *dev;
seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
#ifdef CONFIG_IPV6_SUBTREES
seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
seq_printf(seq, "%pi6 %02x ", &rt->fib6_src.addr, rt->fib6_src.plen);
#else
seq_puts(seq, "00000000000000000000000000000000 00 ");
#endif
if (rt->rt6i_flags & RTF_GATEWAY)
if (rt->fib6_flags & RTF_GATEWAY)
seq_printf(seq, "%pi6", &rt->fib6_nh.nh_gw);
else
seq_puts(seq, "00000000000000000000000000000000");
dev = rt->fib6_nh.nh_dev;
seq_printf(seq, " %08x %08x %08x %08x %8s\n",
rt->rt6i_metric, atomic_read(&rt->rt6i_ref), 0,
rt->rt6i_flags, dev ? dev->name : "");
rt->fib6_metric, atomic_read(&rt->fib6_ref), 0,
rt->fib6_flags, dev ? dev->name : "");
iter->w.leaf = NULL;
return 0;
}

View File

@ -1318,7 +1318,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
}
neigh->flags |= NTF_ROUTER;
} else if (rt) {
rt->rt6i_flags = (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
rt->fib6_flags = (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
}
if (rt)

View File

@ -284,10 +284,10 @@ static const u32 ip6_template_metrics[RTAX_MAX] = {
};
static const struct fib6_info fib6_null_entry_template = {
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
.rt6i_metric = ~(u32)0,
.rt6i_ref = ATOMIC_INIT(1),
.fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
.fib6_protocol = RTPROT_KERNEL,
.fib6_metric = ~(u32)0,
.fib6_ref = ATOMIC_INIT(1),
.fib6_type = RTN_UNREACHABLE,
.fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
};
@ -429,8 +429,8 @@ static struct fib6_info *rt6_multipath_select(const struct net *net,
if (fl6->mp_hash <= atomic_read(&match->fib6_nh.nh_upper_bound))
return match;
list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings,
rt6i_siblings) {
list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
fib6_siblings) {
int nh_upper_bound;
nh_upper_bound = atomic_read(&sibling->fib6_nh.nh_upper_bound);
@ -472,12 +472,12 @@ static inline struct fib6_info *rt6_device_match(struct net *net,
if (dev->ifindex == oif)
return sprt;
if (dev->flags & IFF_LOOPBACK) {
if (!sprt->rt6i_idev ||
sprt->rt6i_idev->dev->ifindex != oif) {
if (!sprt->fib6_idev ||
sprt->fib6_idev->dev->ifindex != oif) {
if (flags & RT6_LOOKUP_F_IFACE)
continue;
if (local &&
local->rt6i_idev->dev->ifindex == oif)
local->fib6_idev->dev->ifindex == oif)
continue;
}
local = sprt;
@ -534,7 +534,7 @@ static void rt6_probe(struct fib6_info *rt)
* Router Reachability Probe MUST be rate-limited
* to no more than one per minute.
*/
if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
if (!rt || !(rt->fib6_flags & RTF_GATEWAY))
return;
nh_gw = &rt->fib6_nh.nh_gw;
@ -550,7 +550,7 @@ static void rt6_probe(struct fib6_info *rt)
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies,
neigh->updated +
rt->rt6i_idev->cnf.rtr_probe_interval)) {
rt->fib6_idev->cnf.rtr_probe_interval)) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work)
__neigh_set_probe_once(neigh);
@ -587,7 +587,7 @@ static inline int rt6_check_dev(struct fib6_info *rt, int oif)
if (!oif || dev->ifindex == oif)
return 2;
if ((dev->flags & IFF_LOOPBACK) &&
rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
rt->fib6_idev && rt->fib6_idev->dev->ifindex == oif)
return 1;
return 0;
}
@ -597,8 +597,8 @@ static inline enum rt6_nud_state rt6_check_neigh(struct fib6_info *rt)
enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
struct neighbour *neigh;
if (rt->rt6i_flags & RTF_NONEXTHOP ||
!(rt->rt6i_flags & RTF_GATEWAY))
if (rt->fib6_flags & RTF_NONEXTHOP ||
!(rt->fib6_flags & RTF_GATEWAY))
return RT6_NUD_SUCCEED;
rcu_read_lock_bh();
@ -632,7 +632,7 @@ static int rt6_score_route(struct fib6_info *rt, int oif, int strict)
if (!m && (strict & RT6_LOOKUP_F_IFACE))
return RT6_NUD_FAIL_HARD;
#ifdef CONFIG_IPV6_ROUTER_PREF
m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->fib6_flags)) << 2;
#endif
if (strict & RT6_LOOKUP_F_REACHABLE) {
int n = rt6_check_neigh(rt);
@ -648,7 +648,7 @@ static struct fib6_info *find_match(struct fib6_info *rt, int oif, int strict,
{
int m;
bool match_do_rr = false;
struct inet6_dev *idev = rt->rt6i_idev;
struct inet6_dev *idev = rt->fib6_idev;
if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
goto out;
@ -694,7 +694,7 @@ static struct fib6_info *find_rr_leaf(struct fib6_node *fn,
match = NULL;
cont = NULL;
for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) {
if (rt->rt6i_metric != metric) {
if (rt->fib6_metric != metric) {
cont = rt;
break;
}
@ -704,7 +704,7 @@ static struct fib6_info *find_rr_leaf(struct fib6_node *fn,
for (rt = leaf; rt && rt != rr_head;
rt = rcu_dereference(rt->rt6_next)) {
if (rt->rt6i_metric != metric) {
if (rt->fib6_metric != metric) {
cont = rt;
break;
}
@ -741,30 +741,30 @@ static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn,
* (This might happen if all routes under fn are deleted from
* the tree and fib6_repair_tree() is called on the node.)
*/
key_plen = rt0->rt6i_dst.plen;
key_plen = rt0->fib6_dst.plen;
#ifdef CONFIG_IPV6_SUBTREES
if (rt0->rt6i_src.plen)
key_plen = rt0->rt6i_src.plen;
if (rt0->fib6_src.plen)
key_plen = rt0->fib6_src.plen;
#endif
if (fn->fn_bit != key_plen)
return net->ipv6.fib6_null_entry;
match = find_rr_leaf(fn, leaf, rt0, rt0->rt6i_metric, oif, strict,
match = find_rr_leaf(fn, leaf, rt0, rt0->fib6_metric, oif, strict,
&do_rr);
if (do_rr) {
struct fib6_info *next = rcu_dereference(rt0->rt6_next);
/* no entries matched; do round-robin */
if (!next || next->rt6i_metric != rt0->rt6i_metric)
if (!next || next->fib6_metric != rt0->fib6_metric)
next = leaf;
if (next != rt0) {
spin_lock_bh(&leaf->rt6i_table->tb6_lock);
spin_lock_bh(&leaf->fib6_table->tb6_lock);
/* make sure next is not being deleted from the tree */
if (next->rt6i_node)
if (next->fib6_node)
rcu_assign_pointer(fn->rr_ptr, next);
spin_unlock_bh(&leaf->rt6i_table->tb6_lock);
spin_unlock_bh(&leaf->fib6_table->tb6_lock);
}
}
@ -773,7 +773,7 @@ static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn,
static bool rt6_is_gw_or_nonexthop(const struct fib6_info *rt)
{
return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
return (rt->fib6_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
}
#ifdef CONFIG_IPV6_ROUTE_INFO
@ -837,8 +837,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
dev, pref);
else if (rt)
rt->rt6i_flags = RTF_ROUTEINFO |
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
rt->fib6_flags = RTF_ROUTEINFO |
(rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
if (rt) {
if (!addrconf_finite_timeout(lifetime))
@ -861,13 +861,13 @@ static struct net_device *ip6_rt_get_dev_rcu(struct fib6_info *rt)
{
struct net_device *dev = rt->fib6_nh.nh_dev;
if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) {
if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
/* for copies of local routes, dst->dev needs to be the
* device if it is a master device, the master device if
* device is enslaved, and the loopback as the default
*/
if (netif_is_l3_slave(dev) &&
!rt6_need_strict(&rt->rt6i_dst.addr))
!rt6_need_strict(&rt->fib6_dst.addr))
dev = l3mdev_master_dev_rcu(dev);
else if (!netif_is_l3_master(dev))
dev = dev_net(dev)->loopback_dev;
@ -939,7 +939,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
{
rt->dst.flags |= fib6_info_dst_flags(ort);
if (ort->rt6i_flags & RTF_REJECT) {
if (ort->fib6_flags & RTF_REJECT) {
ip6_rt_init_dst_reject(rt, ort);
return;
}
@ -949,7 +949,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
if (ort->fib6_type == RTN_LOCAL) {
rt->dst.input = ip6_input;
} else if (ipv6_addr_type(&ort->rt6i_dst.addr) & IPV6_ADDR_MULTICAST) {
} else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
rt->dst.input = ip6_mc_input;
} else {
rt->dst.input = ip6_forward;
@ -979,17 +979,17 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
{
ip6_rt_init_dst(rt, ort);
rt->rt6i_dst = ort->rt6i_dst;
rt->rt6i_idev = ort->rt6i_idev;
rt->rt6i_dst = ort->fib6_dst;
rt->rt6i_idev = ort->fib6_idev;
if (rt->rt6i_idev)
in6_dev_hold(rt->rt6i_idev);
rt->rt6i_gateway = ort->fib6_nh.nh_gw;
rt->rt6i_flags = ort->rt6i_flags;
rt->rt6i_flags = ort->fib6_flags;
rt6_set_from(rt, ort);
#ifdef CONFIG_IPV6_SUBTREES
rt->rt6i_src = ort->rt6i_src;
rt->rt6i_src = ort->fib6_src;
#endif
rt->rt6i_prefsrc = ort->rt6i_prefsrc;
rt->rt6i_prefsrc = ort->fib6_prefsrc;
rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
}
@ -1064,7 +1064,7 @@ restart:
} else {
f6i = rt6_device_match(net, f6i, &fl6->saddr,
fl6->flowi6_oif, flags);
if (f6i->rt6i_nsiblings && fl6->flowi6_oif == 0)
if (f6i->fib6_nsiblings && fl6->flowi6_oif == 0)
f6i = rt6_multipath_select(net, f6i, fl6,
fl6->flowi6_oif, skb, flags);
}
@ -1142,7 +1142,7 @@ static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
int err;
struct fib6_table *table;
table = rt->rt6i_table;
table = rt->fib6_table;
spin_lock_bh(&table->tb6_lock);
err = fib6_add(&table->tb6_root, rt, info, extack);
spin_unlock_bh(&table->tb6_lock);
@ -1182,8 +1182,8 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
rt->rt6i_dst.plen = 128;
if (!rt6_is_gw_or_nonexthop(ort)) {
if (ort->rt6i_dst.plen != 128 &&
ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
if (ort->fib6_dst.plen != 128 &&
ipv6_addr_equal(&ort->fib6_dst.addr, daddr))
rt->rt6i_flags |= RTF_ANYCAST;
#ifdef CONFIG_IPV6_SUBTREES
if (rt->rt6i_src.plen && saddr) {
@ -1375,7 +1375,7 @@ static unsigned int fib6_mtu(const struct fib6_info *rt)
{
unsigned int mtu;
mtu = rt->fib6_pmtu ? : rt->rt6i_idev->cnf.mtu6;
mtu = rt->fib6_pmtu ? : rt->fib6_idev->cnf.mtu6;
mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
return mtu - lwtunnel_headroom(rt->fib6_nh.nh_lwtstate, mtu);
@ -1416,14 +1416,14 @@ static int rt6_insert_exception(struct rt6_info *nrt,
* Otherwise, the exception table is indexed by
* a hash of only rt6i_dst.
*/
if (ort->rt6i_src.plen)
if (ort->fib6_src.plen)
src_key = &nrt->rt6i_src.addr;
#endif
/* Update rt6i_prefsrc as it could be changed
* in rt6_remove_prefsrc()
*/
nrt->rt6i_prefsrc = ort->rt6i_prefsrc;
nrt->rt6i_prefsrc = ort->fib6_prefsrc;
/* rt6_mtu_change() might lower mtu on ort.
* Only insert this exception route if its mtu
* is less than ort's mtu value.
@ -1457,9 +1457,9 @@ out:
/* Update fn->fn_sernum to invalidate all cached dst */
if (!err) {
spin_lock_bh(&ort->rt6i_table->tb6_lock);
spin_lock_bh(&ort->fib6_table->tb6_lock);
fib6_update_sernum(net, ort);
spin_unlock_bh(&ort->rt6i_table->tb6_lock);
spin_unlock_bh(&ort->fib6_table->tb6_lock);
fib6_force_start_gc(net);
}
@ -1514,7 +1514,7 @@ static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
* Otherwise, the exception table is indexed by
* a hash of only rt6i_dst.
*/
if (rt->rt6i_src.plen)
if (rt->fib6_src.plen)
src_key = saddr;
#endif
rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
@ -1551,7 +1551,7 @@ static int rt6_remove_exception_rt(struct rt6_info *rt)
* Otherwise, the exception table is indexed by
* a hash of only rt6i_dst.
*/
if (from->rt6i_src.plen)
if (from->fib6_src.plen)
src_key = &rt->rt6i_src.addr;
#endif
rt6_ex = __rt6_find_exception_spinlock(&bucket,
@ -1592,7 +1592,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
* Otherwise, the exception table is indexed by
* a hash of only rt6i_dst.
*/
if (from->rt6i_src.plen)
if (from->fib6_src.plen)
src_key = &rt->rt6i_src.addr;
#endif
rt6_ex = __rt6_find_exception_rcu(&bucket,
@ -1810,7 +1810,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
redo_rt6_select:
f6i = rt6_select(net, fn, oif, strict);
if (f6i->rt6i_nsiblings)
if (f6i->fib6_nsiblings)
f6i = rt6_multipath_select(net, f6i, fl6, oif, skb, strict);
if (f6i == net->ipv6.fib6_null_entry) {
fn = fib6_backtrack(fn, &fl6->saddr);
@ -1842,7 +1842,7 @@ redo_rt6_select:
trace_fib6_table_lookup(net, rt, table, fl6);
return rt;
} else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
!(f6i->rt6i_flags & RTF_GATEWAY))) {
!(f6i->fib6_flags & RTF_GATEWAY))) {
/* Create a RTF_CACHE clone which will not be
* owned by the fib6 tree. It is for the special case where
* the daddr in the skb during the neighbor look-up is different
@ -2206,7 +2206,7 @@ static void ip6_link_failure(struct sk_buff *skb)
struct fib6_node *fn;
rcu_read_lock();
fn = rcu_dereference(rt->from->rt6i_node);
fn = rcu_dereference(rt->from->fib6_node);
if (fn && (rt->rt6i_flags & RTF_DEFAULT))
fn->fn_sernum = -1;
rcu_read_unlock();
@ -2372,9 +2372,9 @@ restart:
continue;
if (fib6_check_expired(rt))
continue;
if (rt->rt6i_flags & RTF_REJECT)
if (rt->fib6_flags & RTF_REJECT)
break;
if (!(rt->rt6i_flags & RTF_GATEWAY))
if (!(rt->fib6_flags & RTF_GATEWAY))
continue;
if (fl6->flowi6_oif != rt->fib6_nh.nh_dev->ifindex)
continue;
@ -2400,7 +2400,7 @@ restart:
if (!rt)
rt = net->ipv6.fib6_null_entry;
else if (rt->rt6i_flags & RTF_REJECT) {
else if (rt->fib6_flags & RTF_REJECT) {
ret = net->ipv6.ip6_null_entry;
goto out;
}
@ -2907,7 +2907,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (cfg->fc_protocol == RTPROT_UNSPEC)
cfg->fc_protocol = RTPROT_BOOT;
rt->rt6i_protocol = cfg->fc_protocol;
rt->fib6_protocol = cfg->fc_protocol;
addr_type = ipv6_addr_type(&cfg->fc_dst);
@ -2922,17 +2922,17 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
rt->fib6_nh.nh_lwtstate = lwtstate_get(lwtstate);
}
ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt->rt6i_dst.plen = cfg->fc_dst_len;
if (rt->rt6i_dst.plen == 128)
ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt->fib6_dst.plen = cfg->fc_dst_len;
if (rt->fib6_dst.plen == 128)
rt->dst_host = true;
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
rt->rt6i_src.plen = cfg->fc_src_len;
ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
rt->fib6_src.plen = cfg->fc_src_len;
#endif
rt->rt6i_metric = cfg->fc_metric;
rt->fib6_metric = cfg->fc_metric;
rt->fib6_nh.nh_weight = 1;
rt->fib6_type = cfg->fc_type;
@ -2958,7 +2958,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
goto out;
}
}
rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
rt->fib6_flags = RTF_REJECT|RTF_NONEXTHOP;
goto install_route;
}
@ -2992,21 +2992,21 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
err = -EINVAL;
goto out;
}
rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
rt->rt6i_prefsrc.plen = 128;
rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
rt->fib6_prefsrc.plen = 128;
} else
rt->rt6i_prefsrc.plen = 0;
rt->fib6_prefsrc.plen = 0;
rt->rt6i_flags = cfg->fc_flags;
rt->fib6_flags = cfg->fc_flags;
install_route:
if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
if (!(rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
!netif_carrier_ok(dev))
rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
rt->fib6_nh.nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK);
rt->fib6_nh.nh_dev = dev;
rt->rt6i_idev = idev;
rt->rt6i_table = table;
rt->fib6_idev = idev;
rt->fib6_table = table;
cfg->fc_nlinfo.nl_net = dev_net(dev);
@ -3048,7 +3048,7 @@ static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
goto out;
}
table = rt->rt6i_table;
table = rt->fib6_table;
spin_lock_bh(&table->tb6_lock);
err = fib6_del(rt, info);
spin_unlock_bh(&table->tb6_lock);
@ -3075,10 +3075,10 @@ static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
if (rt == net->ipv6.fib6_null_entry)
goto out_put;
table = rt->rt6i_table;
table = rt->fib6_table;
spin_lock_bh(&table->tb6_lock);
if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
struct fib6_info *sibling, *next_sibling;
/* prefer to send a single notification with all hops */
@ -3096,8 +3096,8 @@ static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
}
list_for_each_entry_safe(sibling, next_sibling,
&rt->rt6i_siblings,
rt6i_siblings) {
&rt->fib6_siblings,
fib6_siblings) {
err = fib6_del(sibling, info);
if (err)
goto out_unlock;
@ -3176,9 +3176,9 @@ static int ip6_route_del(struct fib6_config *cfg,
if (cfg->fc_flags & RTF_GATEWAY &&
!ipv6_addr_equal(&cfg->fc_gateway, &rt->fib6_nh.nh_gw))
continue;
if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
continue;
if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
continue;
fib6_info_hold(rt);
rcu_read_unlock();
@ -3336,7 +3336,7 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
for_each_fib6_node_rt_rcu(fn) {
if (rt->fib6_nh.nh_dev->ifindex != ifindex)
continue;
if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
if ((rt->fib6_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
continue;
if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
continue;
@ -3396,7 +3396,7 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
rcu_read_lock();
for_each_fib6_node_rt_rcu(&table->tb6_root) {
if (dev == rt->fib6_nh.nh_dev &&
((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
break;
}
@ -3445,8 +3445,8 @@ static void __rt6_purge_dflt_routers(struct net *net,
restart:
rcu_read_lock();
for_each_fib6_node_rt_rcu(&table->tb6_root) {
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
(!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
(!rt->fib6_idev || rt->fib6_idev->cnf.accept_ra != 2)) {
fib6_info_hold(rt);
rcu_read_unlock();
ip6_del_rt(net, rt);
@ -3607,26 +3607,26 @@ struct fib6_info *addrconf_dst_alloc(struct net *net,
rt->dst_nocount = true;
in6_dev_hold(idev);
rt->rt6i_idev = idev;
rt->fib6_idev = idev;
rt->dst_host = true;
rt->rt6i_protocol = RTPROT_KERNEL;
rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
rt->fib6_protocol = RTPROT_KERNEL;
rt->fib6_flags = RTF_UP | RTF_NONEXTHOP;
if (anycast) {
rt->fib6_type = RTN_ANYCAST;
rt->rt6i_flags |= RTF_ANYCAST;
rt->fib6_flags |= RTF_ANYCAST;
} else {
rt->fib6_type = RTN_LOCAL;
rt->rt6i_flags |= RTF_LOCAL;
rt->fib6_flags |= RTF_LOCAL;
}
rt->fib6_nh.nh_gw = *addr;
dev_hold(dev);
rt->fib6_nh.nh_dev = dev;
rt->rt6i_dst.addr = *addr;
rt->rt6i_dst.plen = 128;
rt->fib6_dst.addr = *addr;
rt->fib6_dst.plen = 128;
tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
rt->rt6i_table = fib6_get_table(net, tb_id);
rt->fib6_table = fib6_get_table(net, tb_id);
return rt;
}
@ -3646,10 +3646,10 @@ static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
if (((void *)rt->fib6_nh.nh_dev == dev || !dev) &&
rt != net->ipv6.fib6_null_entry &&
ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
spin_lock_bh(&rt6_exception_lock);
/* remove prefsrc entry */
rt->rt6i_prefsrc.plen = 0;
rt->fib6_prefsrc.plen = 0;
/* need to update cache as well */
rt6_exceptions_remove_prefsrc(rt);
spin_unlock_bh(&rt6_exception_lock);
@ -3675,7 +3675,7 @@ static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
{
struct in6_addr *gateway = (struct in6_addr *)arg;
if (((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
ipv6_addr_equal(gateway, &rt->fib6_nh.nh_gw)) {
return -1;
}
@ -3707,16 +3707,16 @@ static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
struct fib6_info *iter;
struct fib6_node *fn;
fn = rcu_dereference_protected(rt->rt6i_node,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
fn = rcu_dereference_protected(rt->fib6_node,
lockdep_is_held(&rt->fib6_table->tb6_lock));
iter = rcu_dereference_protected(fn->leaf,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
lockdep_is_held(&rt->fib6_table->tb6_lock));
while (iter) {
if (iter->rt6i_metric == rt->rt6i_metric &&
if (iter->fib6_metric == rt->fib6_metric &&
rt6_qualify_for_ecmp(iter))
return iter;
iter = rcu_dereference_protected(iter->rt6_next,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
lockdep_is_held(&rt->fib6_table->tb6_lock));
}
return NULL;
@ -3726,7 +3726,7 @@ static bool rt6_is_dead(const struct fib6_info *rt)
{
if (rt->fib6_nh.nh_flags & RTNH_F_DEAD ||
(rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
rt->fib6_idev->cnf.ignore_routes_with_linkdown))
return true;
return false;
@ -3740,7 +3740,7 @@ static int rt6_multipath_total_weight(const struct fib6_info *rt)
if (!rt6_is_dead(rt))
total += rt->fib6_nh.nh_weight;
list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) {
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
if (!rt6_is_dead(iter))
total += iter->fib6_nh.nh_weight;
}
@ -3767,7 +3767,7 @@ static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
rt6_upper_bound_set(rt, &weight, total);
list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
rt6_upper_bound_set(iter, &weight, total);
}
@ -3780,7 +3780,7 @@ void rt6_multipath_rebalance(struct fib6_info *rt)
* then there is no need to rebalance upon the removal of every
* sibling route.
*/
if (!rt->rt6i_nsiblings || rt->should_flush)
if (!rt->fib6_nsiblings || rt->should_flush)
return;
/* During lookup routes are evaluated in order, so we need to
@ -3831,7 +3831,7 @@ static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
if (rt->fib6_nh.nh_dev == dev)
return true;
list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
if (iter->fib6_nh.nh_dev == dev)
return true;
@ -3843,7 +3843,7 @@ static void rt6_multipath_flush(struct fib6_info *rt)
struct fib6_info *iter;
rt->should_flush = 1;
list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
iter->should_flush = 1;
}
@ -3856,7 +3856,7 @@ static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
if (rt->fib6_nh.nh_dev == down_dev ||
rt->fib6_nh.nh_flags & RTNH_F_DEAD)
dead++;
list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
if (iter->fib6_nh.nh_dev == down_dev ||
iter->fib6_nh.nh_flags & RTNH_F_DEAD)
dead++;
@ -3872,7 +3872,7 @@ static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
if (rt->fib6_nh.nh_dev == dev)
rt->fib6_nh.nh_flags |= nh_flags;
list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
if (iter->fib6_nh.nh_dev == dev)
iter->fib6_nh.nh_flags |= nh_flags;
}
@ -3893,13 +3893,13 @@ static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
case NETDEV_DOWN:
if (rt->should_flush)
return -1;
if (!rt->rt6i_nsiblings)
if (!rt->fib6_nsiblings)
return rt->fib6_nh.nh_dev == dev ? -1 : 0;
if (rt6_multipath_uses_dev(rt, dev)) {
unsigned int count;
count = rt6_multipath_dead_count(rt, dev);
if (rt->rt6i_nsiblings + 1 == count) {
if (rt->fib6_nsiblings + 1 == count) {
rt6_multipath_flush(rt);
return -1;
}
@ -3911,7 +3911,7 @@ static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
return -2;
case NETDEV_CHANGE:
if (rt->fib6_nh.nh_dev != dev ||
rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
break;
rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
rt6_multipath_rebalance(rt);
@ -4188,10 +4188,10 @@ static void ip6_route_mpath_notify(struct fib6_info *rt,
* nexthop. Since sibling routes are always added at the end of
* the list, find the first sibling of the last route appended
*/
if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
rt = list_first_entry(&rt_last->rt6i_siblings,
if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
rt = list_first_entry(&rt_last->fib6_siblings,
struct fib6_info,
rt6i_siblings);
fib6_siblings);
}
if (rt)
@ -4410,13 +4410,13 @@ static size_t rt6_nlmsg_size(struct fib6_info *rt)
{
int nexthop_len = 0;
if (rt->rt6i_nsiblings) {
if (rt->fib6_nsiblings) {
nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
+ NLA_ALIGN(sizeof(struct rtnexthop))
+ nla_total_size(16) /* RTA_GATEWAY */
+ lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate);
nexthop_len *= rt->rt6i_nsiblings;
nexthop_len *= rt->fib6_nsiblings;
}
return NLMSG_ALIGN(sizeof(struct rtmsg))
@ -4444,11 +4444,11 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct fib6_info *rt,
if (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN) {
*flags |= RTNH_F_LINKDOWN;
if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
if (rt->fib6_idev->cnf.ignore_routes_with_linkdown)
*flags |= RTNH_F_DEAD;
}
if (rt->rt6i_flags & RTF_GATEWAY) {
if (rt->fib6_flags & RTF_GATEWAY) {
if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->fib6_nh.nh_gw) < 0)
goto nla_put_failure;
}
@ -4518,11 +4518,11 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
rtm = nlmsg_data(nlh);
rtm->rtm_family = AF_INET6;
rtm->rtm_dst_len = rt->rt6i_dst.plen;
rtm->rtm_src_len = rt->rt6i_src.plen;
rtm->rtm_dst_len = rt->fib6_dst.plen;
rtm->rtm_src_len = rt->fib6_src.plen;
rtm->rtm_tos = 0;
if (rt->rt6i_table)
table = rt->rt6i_table->tb6_id;
if (rt->fib6_table)
table = rt->fib6_table->tb6_id;
else
table = RT6_TABLE_UNSPEC;
rtm->rtm_table = table;
@ -4532,9 +4532,9 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
rtm->rtm_type = rt->fib6_type;
rtm->rtm_flags = 0;
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = rt->rt6i_protocol;
rtm->rtm_protocol = rt->fib6_protocol;
if (rt->rt6i_flags & RTF_CACHE)
if (rt->fib6_flags & RTF_CACHE)
rtm->rtm_flags |= RTM_F_CLONED;
if (dest) {
@ -4542,7 +4542,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
rtm->rtm_dst_len = 128;
} else if (rtm->rtm_dst_len)
if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr))
goto nla_put_failure;
#ifdef CONFIG_IPV6_SUBTREES
if (src) {
@ -4550,12 +4550,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
rtm->rtm_src_len = 128;
} else if (rtm->rtm_src_len &&
nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr))
goto nla_put_failure;
#endif
if (iif) {
#ifdef CONFIG_IPV6_MROUTE
if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) {
int err = ip6mr_get_route(net, skb, rtm, portid);
if (err == 0)
@ -4573,9 +4573,9 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
}
if (rt->rt6i_prefsrc.plen) {
if (rt->fib6_prefsrc.plen) {
struct in6_addr saddr_buf;
saddr_buf = rt->rt6i_prefsrc.addr;
saddr_buf = rt->fib6_prefsrc.addr;
if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
goto nla_put_failure;
}
@ -4584,13 +4584,13 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
if (rtnetlink_put_metrics(skb, pmetrics) < 0)
goto nla_put_failure;
if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
goto nla_put_failure;
/* For multipath routes, walk the siblings list and add
* each as a nexthop within RTA_MULTIPATH.
*/
if (rt->rt6i_nsiblings) {
if (rt->fib6_nsiblings) {
struct fib6_info *sibling, *next_sibling;
struct nlattr *mp;
@ -4602,7 +4602,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
list_for_each_entry_safe(sibling, next_sibling,
&rt->rt6i_siblings, rt6i_siblings) {
&rt->fib6_siblings, fib6_siblings) {
if (rt6_add_nexthop(skb, sibling) < 0)
goto nla_put_failure;
}
@ -4613,7 +4613,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
}
if (rt->rt6i_flags & RTF_EXPIRES) {
if (rt->fib6_flags & RTF_EXPIRES) {
expires = dst ? dst->expires : rt->expires;
expires -= jiffies;
}
@ -4621,7 +4621,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
goto nla_put_failure;
if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags)))
goto nla_put_failure;
@ -4646,7 +4646,7 @@ int rt6_dump_route(struct fib6_info *rt, void *p_arg)
/* user wants prefix routes only */
if (rtm->rtm_flags & RTM_F_PREFIX &&
!(rt->rt6i_flags & RTF_PREFIX_RT)) {
!(rt->fib6_flags & RTF_PREFIX_RT)) {
/* success since this is not a prefix route */
return 1;
}
@ -4820,7 +4820,7 @@ static int ip6_route_dev_notify(struct notifier_block *this,
if (event == NETDEV_REGISTER) {
net->ipv6.fib6_null_entry->fib6_nh.nh_dev = dev;
net->ipv6.fib6_null_entry->rt6i_idev = in6_dev_get(dev);
net->ipv6.fib6_null_entry->fib6_idev = in6_dev_get(dev);
net->ipv6.ip6_null_entry->dst.dev = dev;
net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
@ -4834,7 +4834,7 @@ static int ip6_route_dev_notify(struct notifier_block *this,
/* NETDEV_UNREGISTER could be fired for multiple times by
* netdev_wait_allrefs(). Make sure we only call this once.
*/
in6_dev_put_clear(&net->ipv6.fib6_null_entry->rt6i_idev);
in6_dev_put_clear(&net->ipv6.fib6_null_entry->fib6_idev);
in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
@ -5157,7 +5157,7 @@ void __init ip6_route_init_special_entries(void)
* the loopback reference in rt6_info will not be taken, do it
* manually for init_net */
init_net.ipv6.fib6_null_entry->fib6_nh.nh_dev = init_net.loopback_dev;
init_net.ipv6.fib6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
init_net.ipv6.fib6_null_entry->fib6_idev = in6_dev_get(init_net.loopback_dev);
init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES