2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* INET An implementation of the TCP/IP protocol suite for the LINUX
|
|
|
|
* operating system. INET is implemented using the BSD Socket
|
|
|
|
* interface as the means of communication with the user level.
|
|
|
|
*
|
|
|
|
* IPv4 Forwarding Information Base: FIB frontend.
|
|
|
|
*
|
|
|
|
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/bitops.h>
|
2006-01-12 04:17:47 +08:00
|
|
|
#include <linux/capability.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/sockios.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/inet.h>
|
2005-12-27 12:43:12 +08:00
|
|
|
#include <linux/inetdevice.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/netdevice.h>
|
2006-08-05 14:04:54 +08:00
|
|
|
#include <linux/if_addr.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/skbuff.h>
|
2012-06-29 16:32:45 +08:00
|
|
|
#include <linux/cache.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/init.h>
|
2006-08-11 14:10:46 +08:00
|
|
|
#include <linux/list.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2022-02-04 21:58:16 +08:00
|
|
|
#include <net/inet_dscp.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/protocol.h>
|
|
|
|
#include <net/route.h>
|
|
|
|
#include <net/tcp.h>
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/arp.h>
|
|
|
|
#include <net/ip_fib.h>
|
2019-06-04 11:19:49 +08:00
|
|
|
#include <net/nexthop.h>
|
2007-03-23 02:55:17 +08:00
|
|
|
#include <net/rtnetlink.h>
|
2011-04-07 12:51:51 +08:00
|
|
|
#include <net/xfrm.h>
|
2015-09-30 11:07:13 +08:00
|
|
|
#include <net/l3mdev.h>
|
2017-01-18 06:57:36 +08:00
|
|
|
#include <net/lwtunnel.h>
|
2015-08-28 23:42:09 +08:00
|
|
|
#include <trace/events/fib.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#ifndef CONFIG_IP_MULTIPLE_TABLES
|
|
|
|
|
2008-01-10 19:22:17 +08:00
|
|
|
static int __net_init fib4_rules_init(struct net *net)
|
2007-11-07 15:34:04 +08:00
|
|
|
{
|
2008-01-10 19:23:38 +08:00
|
|
|
struct fib_table *local_table, *main_table;
|
|
|
|
|
2015-03-07 05:47:00 +08:00
|
|
|
main_table = fib_trie_table(RT_TABLE_MAIN, NULL);
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!main_table)
|
2015-03-12 05:02:16 +08:00
|
|
|
return -ENOMEM;
|
2008-01-10 19:21:49 +08:00
|
|
|
|
2015-03-07 05:47:00 +08:00
|
|
|
local_table = fib_trie_table(RT_TABLE_LOCAL, main_table);
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!local_table)
|
2015-03-12 05:02:16 +08:00
|
|
|
goto fail;
|
2015-03-07 05:47:00 +08:00
|
|
|
|
2008-01-10 19:23:38 +08:00
|
|
|
hlist_add_head_rcu(&local_table->tb_hlist,
|
2008-01-10 19:28:24 +08:00
|
|
|
&net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]);
|
2008-01-10 19:23:38 +08:00
|
|
|
hlist_add_head_rcu(&main_table->tb_hlist,
|
2008-01-10 19:28:24 +08:00
|
|
|
&net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]);
|
2008-01-10 19:21:49 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
2015-03-12 05:02:16 +08:00
|
|
|
fib_free_table(main_table);
|
2008-01-10 19:21:49 +08:00
|
|
|
return -ENOMEM;
|
2007-11-07 15:34:04 +08:00
|
|
|
}
|
2006-08-11 14:10:46 +08:00
|
|
|
#else
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-10 19:24:11 +08:00
|
|
|
struct fib_table *fib_new_table(struct net *net, u32 id)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2015-03-07 05:47:00 +08:00
|
|
|
struct fib_table *tb, *alias = NULL;
|
2006-08-11 14:10:46 +08:00
|
|
|
unsigned int h;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-11 14:10:46 +08:00
|
|
|
if (id == 0)
|
|
|
|
id = RT_TABLE_MAIN;
|
2008-01-10 19:24:11 +08:00
|
|
|
tb = fib_get_table(net, id);
|
2006-08-11 14:10:46 +08:00
|
|
|
if (tb)
|
|
|
|
return tb;
|
2008-01-15 15:14:20 +08:00
|
|
|
|
2017-01-03 05:32:54 +08:00
|
|
|
if (id == RT_TABLE_LOCAL && !net->ipv4.fib_has_custom_rules)
|
2015-03-07 05:47:00 +08:00
|
|
|
alias = fib_new_table(net, RT_TABLE_MAIN);
|
|
|
|
|
|
|
|
tb = fib_trie_table(id, alias);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!tb)
|
|
|
|
return NULL;
|
2012-07-06 13:13:13 +08:00
|
|
|
|
|
|
|
switch (id) {
|
|
|
|
case RT_TABLE_MAIN:
|
2015-03-05 07:02:44 +08:00
|
|
|
rcu_assign_pointer(net->ipv4.fib_main, tb);
|
2012-07-06 13:13:13 +08:00
|
|
|
break;
|
|
|
|
case RT_TABLE_DEFAULT:
|
2015-03-05 07:02:44 +08:00
|
|
|
rcu_assign_pointer(net->ipv4.fib_default, tb);
|
2012-07-06 13:13:13 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-08-11 14:10:46 +08:00
|
|
|
h = id & (FIB_TABLE_HASHSZ - 1);
|
2008-01-10 19:28:24 +08:00
|
|
|
hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
|
2005-04-17 06:20:36 +08:00
|
|
|
return tb;
|
|
|
|
}
|
2016-05-05 12:46:12 +08:00
|
|
|
EXPORT_SYMBOL_GPL(fib_new_table);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-01-01 02:56:24 +08:00
|
|
|
/* caller must hold either rtnl or rcu read lock */
|
2008-01-10 19:24:11 +08:00
|
|
|
struct fib_table *fib_get_table(struct net *net, u32 id)
|
2006-08-11 14:10:46 +08:00
|
|
|
{
|
|
|
|
struct fib_table *tb;
|
2008-01-10 19:28:24 +08:00
|
|
|
struct hlist_head *head;
|
2006-08-11 14:10:46 +08:00
|
|
|
unsigned int h;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-11 14:10:46 +08:00
|
|
|
if (id == 0)
|
|
|
|
id = RT_TABLE_MAIN;
|
|
|
|
h = id & (FIB_TABLE_HASHSZ - 1);
|
2008-01-10 19:28:24 +08:00
|
|
|
|
|
|
|
head = &net->ipv4.fib_table_hash[h];
|
2019-07-17 06:12:24 +08:00
|
|
|
hlist_for_each_entry_rcu(tb, head, tb_hlist,
|
|
|
|
lockdep_rtnl_is_held()) {
|
2015-01-01 02:56:24 +08:00
|
|
|
if (tb->tb_id == id)
|
2006-08-11 14:10:46 +08:00
|
|
|
return tb;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* CONFIG_IP_MULTIPLE_TABLES */
|
|
|
|
|
2015-03-07 05:47:00 +08:00
|
|
|
static void fib_replace_table(struct net *net, struct fib_table *old,
|
|
|
|
struct fib_table *new)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_IP_MULTIPLE_TABLES
|
|
|
|
switch (new->tb_id) {
|
|
|
|
case RT_TABLE_MAIN:
|
|
|
|
rcu_assign_pointer(net->ipv4.fib_main, new);
|
|
|
|
break;
|
|
|
|
case RT_TABLE_DEFAULT:
|
|
|
|
rcu_assign_pointer(net->ipv4.fib_default, new);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
/* replace the old table in the hlist */
|
|
|
|
hlist_replace_rcu(&old->tb_hlist, &new->tb_hlist);
|
|
|
|
}
|
|
|
|
|
|
|
|
int fib_unmerge(struct net *net)
|
|
|
|
{
|
2016-11-15 18:46:06 +08:00
|
|
|
struct fib_table *old, *new, *main_table;
|
2015-03-07 05:47:00 +08:00
|
|
|
|
2015-03-13 05:46:23 +08:00
|
|
|
/* attempt to fetch local table if it has been allocated */
|
2015-03-07 05:47:00 +08:00
|
|
|
old = fib_get_table(net, RT_TABLE_LOCAL);
|
2015-03-13 05:46:23 +08:00
|
|
|
if (!old)
|
|
|
|
return 0;
|
2015-03-07 05:47:00 +08:00
|
|
|
|
2015-03-13 05:46:23 +08:00
|
|
|
new = fib_trie_unmerge(old);
|
2015-03-07 05:47:00 +08:00
|
|
|
if (!new)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-11-15 18:46:06 +08:00
|
|
|
/* table is already unmerged */
|
|
|
|
if (new == old)
|
|
|
|
return 0;
|
|
|
|
|
2015-03-07 05:47:00 +08:00
|
|
|
/* replace merged table with clean table */
|
2016-11-15 18:46:06 +08:00
|
|
|
fib_replace_table(net, old, new);
|
|
|
|
fib_free_table(old);
|
|
|
|
|
|
|
|
/* attempt to fetch main table if it has been allocated */
|
|
|
|
main_table = fib_get_table(net, RT_TABLE_MAIN);
|
|
|
|
if (!main_table)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* flush local entries from main table */
|
|
|
|
fib_table_flush_external(main_table);
|
2015-03-07 05:47:00 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-23 03:04:44 +08:00
|
|
|
void fib_flush(struct net *net)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int flushed = 0;
|
2006-08-11 14:10:46 +08:00
|
|
|
unsigned int h;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-11 14:10:46 +08:00
|
|
|
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
|
2015-03-05 07:02:44 +08:00
|
|
|
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
|
|
|
|
struct hlist_node *tmp;
|
|
|
|
struct fib_table *tb;
|
|
|
|
|
|
|
|
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
|
2019-01-09 17:57:39 +08:00
|
|
|
flushed += fib_table_flush(net, tb, false);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (flushed)
|
2012-09-07 08:45:29 +08:00
|
|
|
rt_cache_flush(net);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-12-05 15:28:46 +08:00
|
|
|
/*
|
|
|
|
* Find address type as if only "dev" was present in the system. If
|
|
|
|
* on_dev is NULL then all interfaces are taken into consideration.
|
|
|
|
*/
|
2012-04-15 13:58:06 +08:00
|
|
|
static inline unsigned int __inet_dev_addr_type(struct net *net,
|
|
|
|
const struct net_device *dev,
|
2015-09-02 04:26:35 +08:00
|
|
|
__be32 addr, u32 tb_id)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-03-12 15:02:42 +08:00
|
|
|
struct flowi4 fl4 = { .daddr = addr };
|
2005-04-17 06:20:36 +08:00
|
|
|
struct fib_result res;
|
2012-04-15 13:58:06 +08:00
|
|
|
unsigned int ret = RTN_BROADCAST;
|
2015-08-14 04:59:04 +08:00
|
|
|
struct fib_table *table;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-21 19:18:08 +08:00
|
|
|
if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
|
2005-04-17 06:20:36 +08:00
|
|
|
return RTN_BROADCAST;
|
2007-12-17 05:45:43 +08:00
|
|
|
if (ipv4_is_multicast(addr))
|
2005-04-17 06:20:36 +08:00
|
|
|
return RTN_MULTICAST;
|
|
|
|
|
2015-01-01 02:56:24 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
|
2015-08-14 04:59:04 +08:00
|
|
|
table = fib_get_table(net, tb_id);
|
|
|
|
if (table) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = RTN_UNICAST;
|
2015-08-14 04:59:04 +08:00
|
|
|
if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) {
|
2019-06-04 11:19:50 +08:00
|
|
|
struct fib_nh_common *nhc = fib_info_nhc(res.fi, 0);
|
2019-06-04 11:19:49 +08:00
|
|
|
|
2019-06-04 11:19:50 +08:00
|
|
|
if (!dev || dev == nhc->nhc_dev)
|
2007-12-05 15:28:46 +08:00
|
|
|
ret = res.type;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
2015-01-01 02:56:24 +08:00
|
|
|
|
|
|
|
rcu_read_unlock();
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-09-02 04:26:35 +08:00
|
|
|
unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id)
|
2015-08-14 04:59:04 +08:00
|
|
|
{
|
|
|
|
return __inet_dev_addr_type(net, NULL, addr, tb_id);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_addr_type_table);
|
|
|
|
|
2008-01-10 19:25:28 +08:00
|
|
|
unsigned int inet_addr_type(struct net *net, __be32 addr)
|
2007-12-05 15:28:46 +08:00
|
|
|
{
|
2015-08-14 04:59:04 +08:00
|
|
|
return __inet_dev_addr_type(net, NULL, addr, RT_TABLE_LOCAL);
|
2007-12-05 15:28:46 +08:00
|
|
|
}
|
2010-07-10 05:22:10 +08:00
|
|
|
EXPORT_SYMBOL(inet_addr_type);
|
2007-12-05 15:28:46 +08:00
|
|
|
|
2008-01-10 19:25:28 +08:00
|
|
|
unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
|
|
|
|
__be32 addr)
|
2007-12-05 15:28:46 +08:00
|
|
|
{
|
2015-09-30 11:07:14 +08:00
|
|
|
u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
|
2015-08-14 04:59:04 +08:00
|
|
|
|
|
|
|
return __inet_dev_addr_type(net, dev, addr, rt_table);
|
2007-12-05 15:28:46 +08:00
|
|
|
}
|
2010-07-10 05:22:10 +08:00
|
|
|
EXPORT_SYMBOL(inet_dev_addr_type);
|
2007-12-05 15:28:46 +08:00
|
|
|
|
2015-08-14 04:59:05 +08:00
|
|
|
/* inet_addr_type with dev == NULL but using the table from a dev
|
|
|
|
* if one is associated
|
|
|
|
*/
|
|
|
|
unsigned int inet_addr_type_dev_table(struct net *net,
|
|
|
|
const struct net_device *dev,
|
|
|
|
__be32 addr)
|
|
|
|
{
|
2015-09-30 11:07:14 +08:00
|
|
|
u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
|
2015-08-14 04:59:05 +08:00
|
|
|
|
|
|
|
return __inet_dev_addr_type(net, NULL, addr, rt_table);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_addr_type_dev_table);
|
|
|
|
|
2012-06-28 18:59:11 +08:00
|
|
|
__be32 fib_compute_spec_dst(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct net_device *dev = skb->dev;
|
|
|
|
struct in_device *in_dev;
|
|
|
|
struct fib_result res;
|
2012-06-29 09:33:24 +08:00
|
|
|
struct rtable *rt;
|
2012-06-28 18:59:11 +08:00
|
|
|
struct net *net;
|
2012-06-29 09:33:24 +08:00
|
|
|
int scope;
|
2012-06-28 18:59:11 +08:00
|
|
|
|
2012-06-29 09:33:24 +08:00
|
|
|
rt = skb_rtable(skb);
|
2012-07-19 05:35:03 +08:00
|
|
|
if ((rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL)) ==
|
|
|
|
RTCF_LOCAL)
|
2012-06-28 18:59:11 +08:00
|
|
|
return ip_hdr(skb)->daddr;
|
|
|
|
|
|
|
|
in_dev = __in_dev_get_rcu(dev);
|
|
|
|
|
|
|
|
net = dev_net(dev);
|
2012-06-29 09:33:24 +08:00
|
|
|
|
|
|
|
scope = RT_SCOPE_UNIVERSE;
|
|
|
|
if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
|
2018-07-28 00:15:46 +08:00
|
|
|
bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
|
2016-03-23 02:56:57 +08:00
|
|
|
struct flowi4 fl4 = {
|
|
|
|
.flowi4_iif = LOOPBACK_IFINDEX,
|
2022-03-15 04:45:51 +08:00
|
|
|
.flowi4_l3mdev = l3mdev_master_ifindex_rcu(dev),
|
2016-03-23 02:56:57 +08:00
|
|
|
.daddr = ip_hdr(skb)->saddr,
|
2020-12-25 03:01:09 +08:00
|
|
|
.flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK,
|
2016-03-23 02:56:57 +08:00
|
|
|
.flowi4_scope = scope,
|
2018-07-28 00:15:46 +08:00
|
|
|
.flowi4_mark = vmark ? skb->mark : 0,
|
2016-03-23 02:56:57 +08:00
|
|
|
};
|
2015-06-24 01:45:37 +08:00
|
|
|
if (!fib_lookup(net, &fl4, &res, 0))
|
2019-04-03 05:11:55 +08:00
|
|
|
return fib_result_prefsrc(net, &res);
|
2012-06-29 09:33:24 +08:00
|
|
|
} else {
|
|
|
|
scope = RT_SCOPE_LINK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
|
2012-06-28 18:59:11 +08:00
|
|
|
}
|
|
|
|
|
2018-09-21 04:50:47 +08:00
|
|
|
bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev)
|
|
|
|
{
|
|
|
|
bool dev_match = false;
|
2018-09-22 01:58:07 +08:00
|
|
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
2020-05-27 02:56:18 +08:00
|
|
|
if (unlikely(fi->nh)) {
|
|
|
|
dev_match = nexthop_uses_dev(fi->nh, dev);
|
|
|
|
} else {
|
|
|
|
int ret;
|
2018-09-21 04:50:47 +08:00
|
|
|
|
2020-05-27 02:56:18 +08:00
|
|
|
for (ret = 0; ret < fib_info_num_path(fi); ret++) {
|
|
|
|
const struct fib_nh_common *nhc = fib_info_nhc(fi, ret);
|
2018-09-21 04:50:47 +08:00
|
|
|
|
2020-05-27 02:56:18 +08:00
|
|
|
if (nhc_l3mdev_matches_dev(nhc, dev)) {
|
|
|
|
dev_match = true;
|
|
|
|
break;
|
|
|
|
}
|
2018-09-21 04:50:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
2019-06-04 11:19:50 +08:00
|
|
|
if (fib_info_nhc(fi, 0)->nhc_dev == dev)
|
2018-09-21 04:50:47 +08:00
|
|
|
dev_match = true;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return dev_match;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fib_info_nh_uses_dev);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Given (packet source, input interface) and optional (dst, oif, tos):
|
2010-10-05 04:00:18 +08:00
|
|
|
* - (main) check, that source is valid i.e. not broadcast or our local
|
|
|
|
* address.
|
|
|
|
* - figure out what "logical" interface this packet arrived
|
|
|
|
* and calculate "specific destination" address.
|
|
|
|
* - check, that packet arrived from expected physical interface.
|
2010-10-05 18:41:36 +08:00
|
|
|
* called with rcu_read_lock()
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2012-06-29 16:32:45 +08:00
|
|
|
static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
|
|
|
u8 tos, int oif, struct net_device *dev,
|
|
|
|
int rpf, struct in_device *idev, u32 *itag)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-05-17 04:36:40 +08:00
|
|
|
struct net *net = dev_net(dev);
|
|
|
|
struct flow_keys flkeys;
|
2014-08-17 15:19:54 +08:00
|
|
|
int ret, no_addr;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct fib_result res;
|
2012-06-29 09:54:02 +08:00
|
|
|
struct flowi4 fl4;
|
|
|
|
bool dev_match;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-12 15:02:42 +08:00
|
|
|
fl4.flowi4_oif = 0;
|
2022-03-15 04:45:51 +08:00
|
|
|
fl4.flowi4_l3mdev = l3mdev_master_ifindex_rcu(dev);
|
|
|
|
fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
|
2011-03-12 15:02:42 +08:00
|
|
|
fl4.daddr = src;
|
|
|
|
fl4.saddr = dst;
|
|
|
|
fl4.flowi4_tos = tos;
|
|
|
|
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
|
2015-07-21 16:43:59 +08:00
|
|
|
fl4.flowi4_tun_key.tun_id = 0;
|
2015-09-30 10:07:07 +08:00
|
|
|
fl4.flowi4_flags = 0;
|
2017-02-26 21:50:52 +08:00
|
|
|
fl4.flowi4_uid = sock_net_uid(net, NULL);
|
2020-09-14 02:43:39 +08:00
|
|
|
fl4.flowi4_multipath_hash = 0;
|
2011-03-10 12:57:50 +08:00
|
|
|
|
2012-06-29 09:54:02 +08:00
|
|
|
no_addr = idev->ifa_list == NULL;
|
2011-04-07 12:51:51 +08:00
|
|
|
|
2012-06-29 09:54:02 +08:00
|
|
|
fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
|
2018-05-17 04:36:40 +08:00
|
|
|
if (!fib4_rules_early_flow_dissect(net, skb, &fl4, &flkeys)) {
|
|
|
|
fl4.flowi4_proto = 0;
|
|
|
|
fl4.fl4_sport = 0;
|
|
|
|
fl4.fl4_dport = 0;
|
2021-06-22 12:24:50 +08:00
|
|
|
} else {
|
|
|
|
swap(fl4.fl4_sport, fl4.fl4_dport);
|
2018-05-17 04:36:40 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-06-24 01:45:37 +08:00
|
|
|
if (fib_lookup(net, &fl4, &res, 0))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto last_resort;
|
2014-08-17 15:19:54 +08:00
|
|
|
if (res.type != RTN_UNICAST &&
|
|
|
|
(res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
|
|
|
|
goto e_inval;
|
2005-04-17 06:20:36 +08:00
|
|
|
fib_combine_itag(itag, &res);
|
2010-09-07 13:36:19 +08:00
|
|
|
|
2018-09-21 04:50:47 +08:00
|
|
|
dev_match = fib_info_nh_uses_dev(res.fi, dev);
|
2019-07-18 05:41:58 +08:00
|
|
|
/* This is not common, loopback packets retain skb_dst so normally they
|
|
|
|
* would not even hit this slow path.
|
|
|
|
*/
|
|
|
|
dev_match = dev_match || (res.type == RTN_LOCAL &&
|
|
|
|
dev == net->loopback_dev);
|
2010-09-07 13:36:19 +08:00
|
|
|
if (dev_match) {
|
2022-10-20 18:09:50 +08:00
|
|
|
ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (no_addr)
|
|
|
|
goto last_resort;
|
2009-02-20 16:25:36 +08:00
|
|
|
if (rpf == 1)
|
2010-06-02 20:05:27 +08:00
|
|
|
goto e_rpf;
|
2011-03-12 15:02:42 +08:00
|
|
|
fl4.flowi4_oif = dev->ifindex;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
ret = 0;
|
2015-06-24 01:45:37 +08:00
|
|
|
if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
|
2012-06-28 19:05:27 +08:00
|
|
|
if (res.type == RTN_UNICAST)
|
2022-10-20 18:09:50 +08:00
|
|
|
ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
last_resort:
|
|
|
|
if (rpf)
|
2010-06-02 20:05:27 +08:00
|
|
|
goto e_rpf;
|
2005-04-17 06:20:36 +08:00
|
|
|
*itag = 0;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
e_inval:
|
|
|
|
return -EINVAL;
|
2010-06-02 20:05:27 +08:00
|
|
|
e_rpf:
|
|
|
|
return -EXDEV;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-06-29 16:32:45 +08:00
|
|
|
/* Ignore rp_filter for packets protected by IPsec. */
|
|
|
|
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
|
|
|
u8 tos, int oif, struct net_device *dev,
|
|
|
|
struct in_device *idev, u32 *itag)
|
|
|
|
{
|
|
|
|
int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
|
2017-09-21 00:26:53 +08:00
|
|
|
struct net *net = dev_net(dev);
|
2012-06-29 16:32:45 +08:00
|
|
|
|
2017-09-21 00:26:53 +08:00
|
|
|
if (!r && !fib_num_tclassid_users(net) &&
|
2012-10-08 19:41:15 +08:00
|
|
|
(dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) {
|
2017-09-21 00:26:53 +08:00
|
|
|
if (IN_DEV_ACCEPT_LOCAL(idev))
|
|
|
|
goto ok;
|
2017-10-31 21:32:38 +08:00
|
|
|
/* with custom local routes in place, checking local addresses
|
|
|
|
* only will be too optimistic, with custom rules, checking
|
|
|
|
* local addresses only can be too strict, e.g. due to vrf
|
2017-09-21 00:26:53 +08:00
|
|
|
*/
|
2017-10-31 21:32:38 +08:00
|
|
|
if (net->ipv4.fib_has_custom_local_routes ||
|
|
|
|
fib4_has_custom_rules(net))
|
2017-09-21 00:26:53 +08:00
|
|
|
goto full_check;
|
2022-02-14 11:27:21 +08:00
|
|
|
/* Within the same container, it is regarded as a martian source,
|
|
|
|
* and the same host but different containers are not.
|
|
|
|
*/
|
2017-09-21 00:26:53 +08:00
|
|
|
if (inet_lookup_ifaddr_rcu(net, src))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ok:
|
2012-06-29 16:32:45 +08:00
|
|
|
*itag = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
2017-09-21 00:26:53 +08:00
|
|
|
|
|
|
|
full_check:
|
2012-06-29 16:32:45 +08:00
|
|
|
return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
|
|
|
|
}
|
|
|
|
|
2006-09-28 09:40:00 +08:00
|
|
|
static inline __be32 sk_extract_addr(struct sockaddr *addr)
|
2006-08-18 09:14:52 +08:00
|
|
|
{
|
|
|
|
return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int put_rtax(struct nlattr *mx, int len, int type, u32 value)
|
|
|
|
{
|
|
|
|
struct nlattr *nla;
|
|
|
|
|
|
|
|
nla = (struct nlattr *) ((char *) mx + len);
|
|
|
|
nla->nla_type = type;
|
|
|
|
nla->nla_len = nla_attr_size(4);
|
|
|
|
*(u32 *) nla_data(nla) = value;
|
|
|
|
|
|
|
|
return len + nla_total_size(4);
|
|
|
|
}
|
|
|
|
|
2008-01-10 19:29:23 +08:00
|
|
|
static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
|
2006-08-18 09:14:52 +08:00
|
|
|
struct fib_config *cfg)
|
|
|
|
{
|
2006-09-27 13:15:46 +08:00
|
|
|
__be32 addr;
|
2006-08-18 09:14:52 +08:00
|
|
|
int plen;
|
|
|
|
|
|
|
|
memset(cfg, 0, sizeof(*cfg));
|
2008-01-10 19:29:23 +08:00
|
|
|
cfg->fc_nlinfo.nl_net = net;
|
2006-08-18 09:14:52 +08:00
|
|
|
|
|
|
|
if (rt->rt_dst.sa_family != AF_INET)
|
|
|
|
return -EAFNOSUPPORT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check mask for validity:
|
|
|
|
* a) it must be contiguous.
|
|
|
|
* b) destination must have all host bits clear.
|
|
|
|
* c) if application forgot to set correct family (AF_INET),
|
|
|
|
* reject request unless it is absolutely clear i.e.
|
|
|
|
* both family and mask are zero.
|
|
|
|
*/
|
|
|
|
plen = 32;
|
|
|
|
addr = sk_extract_addr(&rt->rt_dst);
|
|
|
|
if (!(rt->rt_flags & RTF_HOST)) {
|
2006-09-28 09:40:00 +08:00
|
|
|
__be32 mask = sk_extract_addr(&rt->rt_genmask);
|
2006-08-18 09:14:52 +08:00
|
|
|
|
|
|
|
if (rt->rt_genmask.sa_family != AF_INET) {
|
|
|
|
if (mask || rt->rt_genmask.sa_family)
|
|
|
|
return -EAFNOSUPPORT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bad_mask(mask, addr))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
plen = inet_mask_len(mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg->fc_dst_len = plen;
|
|
|
|
cfg->fc_dst = addr;
|
|
|
|
|
|
|
|
if (cmd != SIOCDELRT) {
|
|
|
|
cfg->fc_nlflags = NLM_F_CREATE;
|
|
|
|
cfg->fc_protocol = RTPROT_BOOT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rt->rt_metric)
|
|
|
|
cfg->fc_priority = rt->rt_metric - 1;
|
|
|
|
|
|
|
|
if (rt->rt_flags & RTF_REJECT) {
|
|
|
|
cfg->fc_scope = RT_SCOPE_HOST;
|
|
|
|
cfg->fc_type = RTN_UNREACHABLE;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg->fc_scope = RT_SCOPE_NOWHERE;
|
|
|
|
cfg->fc_type = RTN_UNICAST;
|
|
|
|
|
|
|
|
if (rt->rt_dev) {
|
|
|
|
char *colon;
|
|
|
|
struct net_device *dev;
|
|
|
|
char devname[IFNAMSIZ];
|
|
|
|
|
|
|
|
if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
devname[IFNAMSIZ-1] = 0;
|
|
|
|
colon = strchr(devname, ':');
|
|
|
|
if (colon)
|
|
|
|
*colon = 0;
|
2008-01-10 19:29:23 +08:00
|
|
|
dev = __dev_get_by_name(net, devname);
|
2006-08-18 09:14:52 +08:00
|
|
|
if (!dev)
|
|
|
|
return -ENODEV;
|
|
|
|
cfg->fc_oif = dev->ifindex;
|
2016-09-05 06:20:20 +08:00
|
|
|
cfg->fc_table = l3mdev_fib_table(dev);
|
2006-08-18 09:14:52 +08:00
|
|
|
if (colon) {
|
2019-06-01 00:27:07 +08:00
|
|
|
const struct in_ifaddr *ifa;
|
|
|
|
struct in_device *in_dev;
|
|
|
|
|
|
|
|
in_dev = __in_dev_get_rtnl(dev);
|
2006-08-18 09:14:52 +08:00
|
|
|
if (!in_dev)
|
|
|
|
return -ENODEV;
|
2019-06-01 00:27:07 +08:00
|
|
|
|
2006-08-18 09:14:52 +08:00
|
|
|
*colon = ':';
|
2019-06-01 00:27:07 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
in_dev_for_each_ifa_rcu(ifa, in_dev) {
|
2006-08-18 09:14:52 +08:00
|
|
|
if (strcmp(ifa->ifa_label, devname) == 0)
|
|
|
|
break;
|
2019-06-01 00:27:07 +08:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!ifa)
|
2006-08-18 09:14:52 +08:00
|
|
|
return -ENODEV;
|
|
|
|
cfg->fc_prefsrc = ifa->ifa_local;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = sk_extract_addr(&rt->rt_gateway);
|
|
|
|
if (rt->rt_gateway.sa_family == AF_INET && addr) {
|
2015-08-14 04:59:05 +08:00
|
|
|
unsigned int addr_type;
|
|
|
|
|
2019-04-06 07:30:28 +08:00
|
|
|
cfg->fc_gw4 = addr;
|
|
|
|
cfg->fc_gw_family = AF_INET;
|
2015-08-14 04:59:05 +08:00
|
|
|
addr_type = inet_addr_type_table(net, addr, cfg->fc_table);
|
2006-08-18 09:14:52 +08:00
|
|
|
if (rt->rt_flags & RTF_GATEWAY &&
|
2015-08-14 04:59:05 +08:00
|
|
|
addr_type == RTN_UNICAST)
|
2006-08-18 09:14:52 +08:00
|
|
|
cfg->fc_scope = RT_SCOPE_UNIVERSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd == SIOCDELRT)
|
|
|
|
return 0;
|
|
|
|
|
2019-04-06 07:30:28 +08:00
|
|
|
if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw_family)
|
2006-08-18 09:14:52 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (cfg->fc_scope == RT_SCOPE_NOWHERE)
|
|
|
|
cfg->fc_scope = RT_SCOPE_LINK;
|
|
|
|
|
|
|
|
if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) {
|
|
|
|
struct nlattr *mx;
|
|
|
|
int len = 0;
|
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:03:40 +08:00
|
|
|
mx = kcalloc(3, nla_total_size(4), GFP_KERNEL);
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!mx)
|
2006-08-18 09:14:52 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (rt->rt_flags & RTF_MTU)
|
|
|
|
len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40);
|
|
|
|
|
|
|
|
if (rt->rt_flags & RTF_WINDOW)
|
|
|
|
len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window);
|
|
|
|
|
|
|
|
if (rt->rt_flags & RTF_IRTT)
|
|
|
|
len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3);
|
|
|
|
|
|
|
|
cfg->fc_mx = mx;
|
|
|
|
cfg->fc_mx_len = len;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2010-10-05 04:00:18 +08:00
|
|
|
* Handle IP routing ioctl calls.
|
|
|
|
* These are used to manipulate the routing tables
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2017-07-01 20:03:10 +08:00
|
|
|
int ip_rt_ioctl(struct net *net, unsigned int cmd, struct rtentry *rt)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-08-18 09:14:52 +08:00
|
|
|
struct fib_config cfg;
|
2005-04-17 06:20:36 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCADDRT: /* Add a route */
|
|
|
|
case SIOCDELRT: /* Delete a route */
|
net: Allow userns root to control ipv4
Allow an unpriviled user who has created a user namespace, and then
created a network namespace to effectively use the new network
namespace, by reducing capable(CAP_NET_ADMIN) and
capable(CAP_NET_RAW) calls to be ns_capable(net->user_ns,
CAP_NET_ADMIN), or capable(net->user_ns, CAP_NET_RAW) calls.
Settings that merely control a single network device are allowed.
Either the network device is a logical network device where
restrictions make no difference or the network device is hardware NIC
that has been explicity moved from the initial network namespace.
In general policy and network stack state changes are allowed
while resource control is left unchanged.
Allow creating raw sockets.
Allow the SIOCSARP ioctl to control the arp cache.
Allow the SIOCSIFFLAG ioctl to allow setting network device flags.
Allow the SIOCSIFADDR ioctl to allow setting a netdevice ipv4 address.
Allow the SIOCSIFBRDADDR ioctl to allow setting a netdevice ipv4 broadcast address.
Allow the SIOCSIFDSTADDR ioctl to allow setting a netdevice ipv4 destination address.
Allow the SIOCSIFNETMASK ioctl to allow setting a netdevice ipv4 netmask.
Allow the SIOCADDRT and SIOCDELRT ioctls to allow adding and deleting ipv4 routes.
Allow the SIOCADDTUNNEL, SIOCCHGTUNNEL and SIOCDELTUNNEL ioctls for
adding, changing and deleting gre tunnels.
Allow the SIOCADDTUNNEL, SIOCCHGTUNNEL and SIOCDELTUNNEL ioctls for
adding, changing and deleting ipip tunnels.
Allow the SIOCADDTUNNEL, SIOCCHGTUNNEL and SIOCDELTUNNEL ioctls for
adding, changing and deleting ipsec virtual tunnel interfaces.
Allow setting the MRT_INIT, MRT_DONE, MRT_ADD_VIF, MRT_DEL_VIF, MRT_ADD_MFC,
MRT_DEL_MFC, MRT_ASSERT, MRT_PIM, MRT_TABLE socket options on multicast routing
sockets.
Allow setting and receiving IPOPT_CIPSO, IP_OPT_SEC, IP_OPT_SID and
arbitrary ip options.
Allow setting IP_SEC_POLICY/IP_XFRM_POLICY ipv4 socket option.
Allow setting the IP_TRANSPARENT ipv4 socket option.
Allow setting the TCP_REPAIR socket option.
Allow setting the TCP_CONGESTION socket option.
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-11-16 11:03:05 +08:00
|
|
|
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EPERM;
|
2006-08-18 09:14:52 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
rtnl_lock();
|
2017-07-01 20:03:10 +08:00
|
|
|
err = rtentry_to_fib_config(net, cmd, rt, &cfg);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (err == 0) {
|
2006-08-18 09:14:52 +08:00
|
|
|
struct fib_table *tb;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (cmd == SIOCDELRT) {
|
2008-01-10 19:29:53 +08:00
|
|
|
tb = fib_get_table(net, cfg.fc_table);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (tb)
|
2017-05-28 06:19:26 +08:00
|
|
|
err = fib_table_delete(net, tb, &cfg,
|
|
|
|
NULL);
|
2006-08-18 09:14:52 +08:00
|
|
|
else
|
|
|
|
err = -ESRCH;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2008-01-10 19:29:53 +08:00
|
|
|
tb = fib_new_table(net, cfg.fc_table);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (tb)
|
2017-05-22 00:12:02 +08:00
|
|
|
err = fib_table_insert(net, tb,
|
|
|
|
&cfg, NULL);
|
2006-08-18 09:14:52 +08:00
|
|
|
else
|
|
|
|
err = -ENOBUFS;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-08-18 09:14:52 +08:00
|
|
|
|
|
|
|
/* allocated by rtentry_to_fib_config() */
|
|
|
|
kfree(cfg.fc_mx);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
rtnl_unlock();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2010-10-05 04:00:18 +08:00
|
|
|
const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
|
2019-05-23 03:07:43 +08:00
|
|
|
[RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
|
2006-08-18 09:14:52 +08:00
|
|
|
[RTA_DST] = { .type = NLA_U32 },
|
|
|
|
[RTA_SRC] = { .type = NLA_U32 },
|
|
|
|
[RTA_IIF] = { .type = NLA_U32 },
|
|
|
|
[RTA_OIF] = { .type = NLA_U32 },
|
|
|
|
[RTA_GATEWAY] = { .type = NLA_U32 },
|
|
|
|
[RTA_PRIORITY] = { .type = NLA_U32 },
|
|
|
|
[RTA_PREFSRC] = { .type = NLA_U32 },
|
|
|
|
[RTA_METRICS] = { .type = NLA_NESTED },
|
2006-08-27 11:13:18 +08:00
|
|
|
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
|
2006-08-18 09:14:52 +08:00
|
|
|
[RTA_FLOW] = { .type = NLA_U32 },
|
2015-07-21 16:43:47 +08:00
|
|
|
[RTA_ENCAP_TYPE] = { .type = NLA_U16 },
|
|
|
|
[RTA_ENCAP] = { .type = NLA_NESTED },
|
2016-11-04 01:23:42 +08:00
|
|
|
[RTA_UID] = { .type = NLA_U32 },
|
2017-02-27 20:59:39 +08:00
|
|
|
[RTA_MARK] = { .type = NLA_U32 },
|
2018-05-23 04:44:51 +08:00
|
|
|
[RTA_TABLE] = { .type = NLA_U32 },
|
2018-05-23 05:03:27 +08:00
|
|
|
[RTA_IP_PROTO] = { .type = NLA_U8 },
|
|
|
|
[RTA_SPORT] = { .type = NLA_U16 },
|
|
|
|
[RTA_DPORT] = { .type = NLA_U16 },
|
2019-06-09 05:53:32 +08:00
|
|
|
[RTA_NH_ID] = { .type = NLA_U32 },
|
2006-08-18 09:14:52 +08:00
|
|
|
};
|
|
|
|
|
2019-04-06 07:30:40 +08:00
|
|
|
int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct rtvia *via;
|
|
|
|
int alen;
|
|
|
|
|
|
|
|
if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr)) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Invalid attribute length for RTA_VIA");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
via = nla_data(nla);
|
|
|
|
alen = nla_len(nla) - offsetof(struct rtvia, rtvia_addr);
|
|
|
|
|
|
|
|
switch (via->rtvia_family) {
|
|
|
|
case AF_INET:
|
|
|
|
if (alen != sizeof(__be32)) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_VIA");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
cfg->fc_gw_family = AF_INET;
|
|
|
|
cfg->fc_gw4 = *((__be32 *)via->rtvia_addr);
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
2020-11-16 06:45:09 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2019-04-06 07:30:40 +08:00
|
|
|
if (alen != sizeof(struct in6_addr)) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_VIA");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
cfg->fc_gw_family = AF_INET6;
|
|
|
|
cfg->fc_gw6 = *((struct in6_addr *)via->rtvia_addr);
|
|
|
|
#else
|
|
|
|
NL_SET_ERR_MSG(extack, "IPv6 support not enabled in kernel");
|
|
|
|
return -EINVAL;
|
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
NL_SET_ERR_MSG(extack, "Unsupported address family in RTA_VIA");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-01-10 19:29:23 +08:00
|
|
|
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
|
2017-05-22 00:12:02 +08:00
|
|
|
struct nlmsghdr *nlh, struct fib_config *cfg,
|
|
|
|
struct netlink_ext_ack *extack)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2019-04-06 07:30:40 +08:00
|
|
|
bool has_gw = false, has_via = false;
|
2006-08-18 09:14:52 +08:00
|
|
|
struct nlattr *attr;
|
|
|
|
int err, remaining;
|
|
|
|
struct rtmsg *rtm;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 20:07:28 +08:00
|
|
|
err = nlmsg_validate_deprecated(nlh, sizeof(*rtm), RTA_MAX,
|
|
|
|
rtm_ipv4_policy, extack);
|
2006-08-18 09:14:52 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
memset(cfg, 0, sizeof(*cfg));
|
|
|
|
|
|
|
|
rtm = nlmsg_data(nlh);
|
2022-02-04 21:58:16 +08:00
|
|
|
|
|
|
|
if (!inet_validate_dscp(rtm->rtm_tos)) {
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"Invalid dsfield (tos): ECN bits must be 0");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
cfg->fc_dscp = inet_dsfield_to_dscp(rtm->rtm_tos);
|
|
|
|
|
2006-08-18 09:14:52 +08:00
|
|
|
cfg->fc_dst_len = rtm->rtm_dst_len;
|
|
|
|
cfg->fc_table = rtm->rtm_table;
|
|
|
|
cfg->fc_protocol = rtm->rtm_protocol;
|
|
|
|
cfg->fc_scope = rtm->rtm_scope;
|
|
|
|
cfg->fc_type = rtm->rtm_type;
|
|
|
|
cfg->fc_flags = rtm->rtm_flags;
|
|
|
|
cfg->fc_nlflags = nlh->nlmsg_flags;
|
|
|
|
|
2012-09-08 04:12:54 +08:00
|
|
|
cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
|
2006-08-18 09:14:52 +08:00
|
|
|
cfg->fc_nlinfo.nlh = nlh;
|
2008-01-10 19:29:23 +08:00
|
|
|
cfg->fc_nlinfo.nl_net = net;
|
2006-08-18 09:14:52 +08:00
|
|
|
|
2007-03-25 11:32:54 +08:00
|
|
|
if (cfg->fc_type > RTN_MAX) {
|
2017-05-22 00:12:03 +08:00
|
|
|
NL_SET_ERR_MSG(extack, "Invalid route type");
|
2007-03-25 11:32:54 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
2006-08-18 09:14:52 +08:00
|
|
|
nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) {
|
2007-09-12 20:44:36 +08:00
|
|
|
switch (nla_type(attr)) {
|
2006-08-18 09:14:52 +08:00
|
|
|
case RTA_DST:
|
2006-09-27 13:15:25 +08:00
|
|
|
cfg->fc_dst = nla_get_be32(attr);
|
2006-08-18 09:14:52 +08:00
|
|
|
break;
|
|
|
|
case RTA_OIF:
|
|
|
|
cfg->fc_oif = nla_get_u32(attr);
|
|
|
|
break;
|
|
|
|
case RTA_GATEWAY:
|
2019-04-06 07:30:40 +08:00
|
|
|
has_gw = true;
|
2019-04-06 07:30:28 +08:00
|
|
|
cfg->fc_gw4 = nla_get_be32(attr);
|
2019-04-11 01:05:51 +08:00
|
|
|
if (cfg->fc_gw4)
|
|
|
|
cfg->fc_gw_family = AF_INET;
|
2006-08-18 09:14:52 +08:00
|
|
|
break;
|
2019-02-27 01:00:02 +08:00
|
|
|
case RTA_VIA:
|
2019-04-06 07:30:40 +08:00
|
|
|
has_via = true;
|
|
|
|
err = fib_gw_from_via(cfg, attr, extack);
|
|
|
|
if (err)
|
|
|
|
goto errout;
|
|
|
|
break;
|
2006-08-18 09:14:52 +08:00
|
|
|
case RTA_PRIORITY:
|
|
|
|
cfg->fc_priority = nla_get_u32(attr);
|
|
|
|
break;
|
|
|
|
case RTA_PREFSRC:
|
2006-09-27 13:15:25 +08:00
|
|
|
cfg->fc_prefsrc = nla_get_be32(attr);
|
2006-08-18 09:14:52 +08:00
|
|
|
break;
|
|
|
|
case RTA_METRICS:
|
|
|
|
cfg->fc_mx = nla_data(attr);
|
|
|
|
cfg->fc_mx_len = nla_len(attr);
|
|
|
|
break;
|
|
|
|
case RTA_MULTIPATH:
|
2017-01-18 06:57:36 +08:00
|
|
|
err = lwtunnel_valid_encap_type_attr(nla_data(attr),
|
2017-05-28 06:19:27 +08:00
|
|
|
nla_len(attr),
|
|
|
|
extack);
|
2017-01-18 06:57:36 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto errout;
|
2006-08-18 09:14:52 +08:00
|
|
|
cfg->fc_mp = nla_data(attr);
|
|
|
|
cfg->fc_mp_len = nla_len(attr);
|
|
|
|
break;
|
|
|
|
case RTA_FLOW:
|
|
|
|
cfg->fc_flow = nla_get_u32(attr);
|
|
|
|
break;
|
|
|
|
case RTA_TABLE:
|
|
|
|
cfg->fc_table = nla_get_u32(attr);
|
|
|
|
break;
|
2015-07-21 16:43:47 +08:00
|
|
|
case RTA_ENCAP:
|
|
|
|
cfg->fc_encap = attr;
|
|
|
|
break;
|
|
|
|
case RTA_ENCAP_TYPE:
|
|
|
|
cfg->fc_encap_type = nla_get_u16(attr);
|
2017-05-28 06:19:27 +08:00
|
|
|
err = lwtunnel_valid_encap_type(cfg->fc_encap_type,
|
|
|
|
extack);
|
2017-01-18 06:57:36 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto errout;
|
2015-07-21 16:43:47 +08:00
|
|
|
break;
|
2019-06-09 05:53:32 +08:00
|
|
|
case RTA_NH_ID:
|
|
|
|
cfg->fc_nh_id = nla_get_u32(attr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cfg->fc_nh_id) {
|
|
|
|
if (cfg->fc_oif || cfg->fc_gw_family ||
|
|
|
|
cfg->fc_encap || cfg->fc_mp) {
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"Nexthop specification and nexthop id are mutually exclusive");
|
|
|
|
return -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
2006-08-18 09:14:52 +08:00
|
|
|
|
2019-04-06 07:30:40 +08:00
|
|
|
if (has_gw && has_via) {
|
|
|
|
NL_SET_ERR_MSG(extack,
|
|
|
|
"Nexthop configuration can not contain both GATEWAY and VIA");
|
2020-12-04 16:48:14 +08:00
|
|
|
return -EINVAL;
|
2019-04-06 07:30:40 +08:00
|
|
|
}
|
|
|
|
|
2022-12-04 15:50:45 +08:00
|
|
|
if (!cfg->fc_table)
|
|
|
|
cfg->fc_table = RT_TABLE_MAIN;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
2006-08-18 09:14:52 +08:00
|
|
|
errout:
|
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-04-17 00:48:24 +08:00
|
|
|
static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
struct netlink_ext_ack *extack)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-03-26 01:26:21 +08:00
|
|
|
struct net *net = sock_net(skb->sk);
|
2006-08-18 09:14:52 +08:00
|
|
|
struct fib_config cfg;
|
|
|
|
struct fib_table *tb;
|
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-05-22 00:12:02 +08:00
|
|
|
err = rtm_to_fib_config(net, skb, nlh, &cfg, extack);
|
2006-08-18 09:14:52 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto errout;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-06-09 05:53:32 +08:00
|
|
|
if (cfg.fc_nh_id && !nexthop_find_by_id(net, cfg.fc_nh_id)) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
2008-01-10 19:24:11 +08:00
|
|
|
tb = fib_get_table(net, cfg.fc_table);
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!tb) {
|
2017-05-22 00:12:03 +08:00
|
|
|
NL_SET_ERR_MSG(extack, "FIB table does not exist");
|
2006-08-18 09:14:52 +08:00
|
|
|
err = -ESRCH;
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
2017-05-28 06:19:26 +08:00
|
|
|
err = fib_table_delete(net, tb, &cfg, extack);
|
2006-08-18 09:14:52 +08:00
|
|
|
errout:
|
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-04-17 00:48:24 +08:00
|
|
|
static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
struct netlink_ext_ack *extack)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-03-26 01:26:21 +08:00
|
|
|
struct net *net = sock_net(skb->sk);
|
2006-08-18 09:14:52 +08:00
|
|
|
struct fib_config cfg;
|
|
|
|
struct fib_table *tb;
|
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-05-22 00:12:02 +08:00
|
|
|
err = rtm_to_fib_config(net, skb, nlh, &cfg, extack);
|
2006-08-18 09:14:52 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto errout;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-10 19:30:24 +08:00
|
|
|
tb = fib_new_table(net, cfg.fc_table);
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!tb) {
|
2006-08-18 09:14:52 +08:00
|
|
|
err = -ENOBUFS;
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
2017-05-22 00:12:02 +08:00
|
|
|
err = fib_table_insert(net, tb, &cfg, extack);
|
2017-09-21 00:26:53 +08:00
|
|
|
if (!err && cfg.fc_type == RTN_LOCAL)
|
|
|
|
net->ipv4.fib_has_custom_local_routes = true;
|
2006-08-18 09:14:52 +08:00
|
|
|
errout:
|
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-10-16 09:56:42 +08:00
|
|
|
int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
|
|
|
|
struct fib_dump_filter *filter,
|
2018-10-16 09:56:48 +08:00
|
|
|
struct netlink_callback *cb)
|
2018-10-08 11:16:35 +08:00
|
|
|
{
|
2018-10-16 09:56:48 +08:00
|
|
|
struct netlink_ext_ack *extack = cb->extack;
|
|
|
|
struct nlattr *tb[RTA_MAX + 1];
|
2018-10-08 11:16:35 +08:00
|
|
|
struct rtmsg *rtm;
|
2018-10-16 09:56:48 +08:00
|
|
|
int err, i;
|
|
|
|
|
|
|
|
ASSERT_RTNL();
|
2018-10-08 11:16:35 +08:00
|
|
|
|
|
|
|
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Invalid header for FIB dump request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
rtm = nlmsg_data(nlh);
|
|
|
|
if (rtm->rtm_dst_len || rtm->rtm_src_len || rtm->rtm_tos ||
|
2018-10-16 09:56:48 +08:00
|
|
|
rtm->rtm_scope) {
|
2018-10-08 11:16:35 +08:00
|
|
|
NL_SET_ERR_MSG(extack, "Invalid values in header for FIB dump request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
fib_frontend, ip6_fib: Select routes or exceptions dump from RTM_F_CLONED
The following patches add back the ability to dump IPv4 and IPv6 exception
routes, and we need to allow selection of regular routes or exceptions.
Use RTM_F_CLONED as filter to decide whether to dump routes or exceptions:
iproute2 passes it in dump requests (except for IPv6 cache flush requests,
this will be fixed in iproute2) and this used to work as long as
exceptions were stored directly in the FIB, for both IPv4 and IPv6.
Caveat: if strict checking is not requested (that is, if the dump request
doesn't go through ip_valid_fib_dump_req()), we can't filter on protocol,
tables or route types.
In this case, filtering on RTM_F_CLONED would be inconsistent: we would
fix 'ip route list cache' by returning exception routes and at the same
time introduce another bug in case another selector is present, e.g. on
'ip route list cache table main' we would return all exception routes,
without filtering on tables.
Keep this consistent by applying no filters at all, and dumping both
routes and exceptions, if strict checking is not requested. iproute2
currently filters results anyway, and no unwanted results will be
presented to the user. The kernel will just dump more data than needed.
v7: No changes
v6: Rebase onto net-next, no changes
v5: New patch: add dump_routes and dump_exceptions flags in filter and
simply clear the unwanted one if strict checking is enabled, don't
ignore NLM_F_MATCH and don't set filter_set if NLM_F_MATCH is set.
Skip filtering altogether if no strict checking is requested:
selecting routes or exceptions only would be inconsistent with the
fact we can't filter on tables.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-21 23:45:20 +08:00
|
|
|
|
2018-10-08 11:16:35 +08:00
|
|
|
if (rtm->rtm_flags & ~(RTM_F_CLONED | RTM_F_PREFIX)) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Invalid flags for FIB dump request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
fib_frontend, ip6_fib: Select routes or exceptions dump from RTM_F_CLONED
The following patches add back the ability to dump IPv4 and IPv6 exception
routes, and we need to allow selection of regular routes or exceptions.
Use RTM_F_CLONED as filter to decide whether to dump routes or exceptions:
iproute2 passes it in dump requests (except for IPv6 cache flush requests,
this will be fixed in iproute2) and this used to work as long as
exceptions were stored directly in the FIB, for both IPv4 and IPv6.
Caveat: if strict checking is not requested (that is, if the dump request
doesn't go through ip_valid_fib_dump_req()), we can't filter on protocol,
tables or route types.
In this case, filtering on RTM_F_CLONED would be inconsistent: we would
fix 'ip route list cache' by returning exception routes and at the same
time introduce another bug in case another selector is present, e.g. on
'ip route list cache table main' we would return all exception routes,
without filtering on tables.
Keep this consistent by applying no filters at all, and dumping both
routes and exceptions, if strict checking is not requested. iproute2
currently filters results anyway, and no unwanted results will be
presented to the user. The kernel will just dump more data than needed.
v7: No changes
v6: Rebase onto net-next, no changes
v5: New patch: add dump_routes and dump_exceptions flags in filter and
simply clear the unwanted one if strict checking is enabled, don't
ignore NLM_F_MATCH and don't set filter_set if NLM_F_MATCH is set.
Skip filtering altogether if no strict checking is requested:
selecting routes or exceptions only would be inconsistent with the
fact we can't filter on tables.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-21 23:45:20 +08:00
|
|
|
if (rtm->rtm_flags & RTM_F_CLONED)
|
|
|
|
filter->dump_routes = false;
|
|
|
|
else
|
|
|
|
filter->dump_exceptions = false;
|
2018-10-08 11:16:35 +08:00
|
|
|
|
2018-10-16 09:56:48 +08:00
|
|
|
filter->flags = rtm->rtm_flags;
|
|
|
|
filter->protocol = rtm->rtm_protocol;
|
|
|
|
filter->rt_type = rtm->rtm_type;
|
|
|
|
filter->table_id = rtm->rtm_table;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 20:07:28 +08:00
|
|
|
err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
|
|
|
|
rtm_ipv4_policy, extack);
|
2018-10-16 09:56:48 +08:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
for (i = 0; i <= RTA_MAX; ++i) {
|
|
|
|
int ifindex;
|
|
|
|
|
|
|
|
if (!tb[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
switch (i) {
|
|
|
|
case RTA_TABLE:
|
|
|
|
filter->table_id = nla_get_u32(tb[i]);
|
|
|
|
break;
|
|
|
|
case RTA_OIF:
|
|
|
|
ifindex = nla_get_u32(tb[i]);
|
|
|
|
filter->dev = __dev_get_by_index(net, ifindex);
|
|
|
|
if (!filter->dev)
|
|
|
|
return -ENODEV;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
NL_SET_ERR_MSG(extack, "Unsupported attribute in dump request");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (filter->flags || filter->protocol || filter->rt_type ||
|
|
|
|
filter->table_id || filter->dev) {
|
|
|
|
filter->filter_set = 1;
|
|
|
|
cb->answer_flags = NLM_F_DUMP_FILTERED;
|
2018-10-08 11:16:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ip_valid_fib_dump_req);
|
|
|
|
|
2007-03-23 02:55:17 +08:00
|
|
|
static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
fib_frontend, ip6_fib: Select routes or exceptions dump from RTM_F_CLONED
The following patches add back the ability to dump IPv4 and IPv6 exception
routes, and we need to allow selection of regular routes or exceptions.
Use RTM_F_CLONED as filter to decide whether to dump routes or exceptions:
iproute2 passes it in dump requests (except for IPv6 cache flush requests,
this will be fixed in iproute2) and this used to work as long as
exceptions were stored directly in the FIB, for both IPv4 and IPv6.
Caveat: if strict checking is not requested (that is, if the dump request
doesn't go through ip_valid_fib_dump_req()), we can't filter on protocol,
tables or route types.
In this case, filtering on RTM_F_CLONED would be inconsistent: we would
fix 'ip route list cache' by returning exception routes and at the same
time introduce another bug in case another selector is present, e.g. on
'ip route list cache table main' we would return all exception routes,
without filtering on tables.
Keep this consistent by applying no filters at all, and dumping both
routes and exceptions, if strict checking is not requested. iproute2
currently filters results anyway, and no unwanted results will be
presented to the user. The kernel will just dump more data than needed.
v7: No changes
v6: Rebase onto net-next, no changes
v5: New patch: add dump_routes and dump_exceptions flags in filter and
simply clear the unwanted one if strict checking is enabled, don't
ignore NLM_F_MATCH and don't set filter_set if NLM_F_MATCH is set.
Skip filtering altogether if no strict checking is requested:
selecting routes or exceptions only would be inconsistent with the
fact we can't filter on tables.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-21 23:45:20 +08:00
|
|
|
struct fib_dump_filter filter = { .dump_routes = true,
|
|
|
|
.dump_exceptions = true };
|
2018-10-08 11:16:35 +08:00
|
|
|
const struct nlmsghdr *nlh = cb->nlh;
|
2008-03-26 01:26:21 +08:00
|
|
|
struct net *net = sock_net(skb->sk);
|
2006-08-11 14:10:46 +08:00
|
|
|
unsigned int h, s_h;
|
|
|
|
unsigned int e = 0, s_e;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct fib_table *tb;
|
2008-01-10 19:28:24 +08:00
|
|
|
struct hlist_head *head;
|
2017-05-16 14:19:17 +08:00
|
|
|
int dumped = 0, err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-10-08 11:16:35 +08:00
|
|
|
if (cb->strict_check) {
|
2018-10-16 09:56:48 +08:00
|
|
|
err = ip_valid_fib_dump_req(net, nlh, &filter, cb);
|
2018-10-08 11:16:35 +08:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2018-10-16 09:56:51 +08:00
|
|
|
} else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) {
|
|
|
|
struct rtmsg *rtm = nlmsg_data(nlh);
|
|
|
|
|
|
|
|
filter.flags = rtm->rtm_flags & (RTM_F_PREFIX | RTM_F_CLONED);
|
2018-10-08 11:16:35 +08:00
|
|
|
}
|
|
|
|
|
2019-06-21 23:45:21 +08:00
|
|
|
/* ipv4 does not use prefix flag */
|
|
|
|
if (filter.flags & RTM_F_PREFIX)
|
2014-03-21 11:33:10 +08:00
|
|
|
return skb->len;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-10-16 09:56:43 +08:00
|
|
|
if (filter.table_id) {
|
|
|
|
tb = fib_get_table(net, filter.table_id);
|
|
|
|
if (!tb) {
|
net: don't return invalid table id error when we fall back to PF_UNSPEC
In case we can't find a ->dumpit callback for the requested
(family,type) pair, we fall back to (PF_UNSPEC,type). In effect, we're
in the same situation as if userspace had requested a PF_UNSPEC
dump. For RTM_GETROUTE, that handler is rtnl_dump_all, which calls all
the registered RTM_GETROUTE handlers.
The requested table id may or may not exist for all of those
families. commit ae677bbb4441 ("net: Don't return invalid table id
error when dumping all families") fixed the problem when userspace
explicitly requests a PF_UNSPEC dump, but missed the fallback case.
For example, when we pass ipv6.disable=1 to a kernel with
CONFIG_IP_MROUTE=y and CONFIG_IP_MROUTE_MULTIPLE_TABLES=y,
the (PF_INET6, RTM_GETROUTE) handler isn't registered, so we end up in
rtnl_dump_all, and listing IPv6 routes will unexpectedly print:
# ip -6 r
Error: ipv4: MR table does not exist.
Dump terminated
commit ae677bbb4441 introduced the dump_all_families variable, which
gets set when userspace requests a PF_UNSPEC dump. However, we can't
simply set the family to PF_UNSPEC in rtnetlink_rcv_msg in the
fallback case to get dump_all_families == true, because some messages
types (for example RTM_GETRULE and RTM_GETNEIGH) only register the
PF_UNSPEC handler and use the family to filter in the kernel what is
dumped to userspace. We would then export more entries, that userspace
would have to filter. iproute does that, but other programs may not.
Instead, this patch removes dump_all_families and updates the
RTM_GETROUTE handlers to check if the family that is being dumped is
their own. When it's not, which covers both the intentional PF_UNSPEC
dumps (as dump_all_families did) and the fallback case, ignore the
missing table id error.
Fixes: cb167893f41e ("net: Plumb support for filtering ipv4 and ipv6 multicast route dumps")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-20 17:15:46 +08:00
|
|
|
if (rtnl_msg_family(cb->nlh) != PF_INET)
|
2018-10-25 03:59:01 +08:00
|
|
|
return skb->len;
|
|
|
|
|
2018-10-16 09:56:43 +08:00
|
|
|
NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2020-03-20 10:54:21 +08:00
|
|
|
rcu_read_lock();
|
2018-10-16 09:56:43 +08:00
|
|
|
err = fib_table_dump(tb, skb, cb, &filter);
|
2020-03-20 10:54:21 +08:00
|
|
|
rcu_read_unlock();
|
2018-10-16 09:56:43 +08:00
|
|
|
return skb->len ? : err;
|
|
|
|
}
|
|
|
|
|
2006-08-11 14:10:46 +08:00
|
|
|
s_h = cb->args[0];
|
|
|
|
s_e = cb->args[1];
|
|
|
|
|
2015-03-05 07:02:44 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
|
2006-08-11 14:10:46 +08:00
|
|
|
for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
|
|
|
|
e = 0;
|
2008-01-10 19:28:24 +08:00
|
|
|
head = &net->ipv4.fib_table_hash[h];
|
2015-03-05 07:02:44 +08:00
|
|
|
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
|
2006-08-11 14:10:46 +08:00
|
|
|
if (e < s_e)
|
|
|
|
goto next;
|
|
|
|
if (dumped)
|
|
|
|
memset(&cb->args[2], 0, sizeof(cb->args) -
|
2007-02-09 22:24:47 +08:00
|
|
|
2 * sizeof(cb->args[0]));
|
2018-10-16 09:56:43 +08:00
|
|
|
err = fib_table_dump(tb, skb, cb, &filter);
|
2017-05-16 14:19:17 +08:00
|
|
|
if (err < 0) {
|
|
|
|
if (likely(skb->len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
goto out_err;
|
|
|
|
}
|
2006-08-11 14:10:46 +08:00
|
|
|
dumped = 1;
|
|
|
|
next:
|
|
|
|
e++;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-08-11 14:10:46 +08:00
|
|
|
out:
|
2017-05-16 14:19:17 +08:00
|
|
|
err = skb->len;
|
|
|
|
out_err:
|
2015-03-05 07:02:44 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2006-08-11 14:10:46 +08:00
|
|
|
cb->args[1] = e;
|
|
|
|
cb->args[0] = h;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-05-16 14:19:17 +08:00
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Prepare and feed intra-kernel routing request.
|
2010-10-05 04:00:18 +08:00
|
|
|
* Really, it should be netlink message, but :-( netlink
|
|
|
|
* can be not configured, so that we feed it directly
|
|
|
|
* to fib engine. It is legal, because all events occur
|
|
|
|
* only when netlink is already locked.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2018-05-27 23:09:57 +08:00
|
|
|
static void fib_magic(int cmd, int type, __be32 dst, int dst_len,
|
|
|
|
struct in_ifaddr *ifa, u32 rt_priority)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-03-25 20:47:49 +08:00
|
|
|
struct net *net = dev_net(ifa->ifa_dev->dev);
|
2015-09-30 11:07:14 +08:00
|
|
|
u32 tb_id = l3mdev_fib_table(ifa->ifa_dev->dev);
|
2006-08-18 09:14:52 +08:00
|
|
|
struct fib_table *tb;
|
|
|
|
struct fib_config cfg = {
|
|
|
|
.fc_protocol = RTPROT_KERNEL,
|
|
|
|
.fc_type = type,
|
|
|
|
.fc_dst = dst,
|
|
|
|
.fc_dst_len = dst_len,
|
2018-05-27 23:09:57 +08:00
|
|
|
.fc_priority = rt_priority,
|
2006-08-18 09:14:52 +08:00
|
|
|
.fc_prefsrc = ifa->ifa_local,
|
|
|
|
.fc_oif = ifa->ifa_dev->dev->ifindex,
|
|
|
|
.fc_nlflags = NLM_F_CREATE | NLM_F_APPEND,
|
2008-01-10 19:26:13 +08:00
|
|
|
.fc_nlinfo = {
|
2008-01-10 19:29:23 +08:00
|
|
|
.nl_net = net,
|
2008-01-10 19:26:13 +08:00
|
|
|
},
|
2006-08-18 09:14:52 +08:00
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-08-14 04:59:06 +08:00
|
|
|
if (!tb_id)
|
|
|
|
tb_id = (type == RTN_UNICAST) ? RT_TABLE_MAIN : RT_TABLE_LOCAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-08-14 04:59:06 +08:00
|
|
|
tb = fib_new_table(net, tb_id);
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!tb)
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
|
2006-08-18 09:14:52 +08:00
|
|
|
cfg.fc_table = tb->tb_id;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-18 09:14:52 +08:00
|
|
|
if (type != RTN_LOCAL)
|
|
|
|
cfg.fc_scope = RT_SCOPE_LINK;
|
|
|
|
else
|
|
|
|
cfg.fc_scope = RT_SCOPE_HOST;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (cmd == RTM_NEWROUTE)
|
2017-05-22 00:12:02 +08:00
|
|
|
fib_table_insert(net, tb, &cfg, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
else
|
2017-05-28 06:19:26 +08:00
|
|
|
fib_table_delete(net, tb, &cfg, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-11-23 06:47:37 +08:00
|
|
|
void fib_add_ifaddr(struct in_ifaddr *ifa)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct in_device *in_dev = ifa->ifa_dev;
|
|
|
|
struct net_device *dev = in_dev->dev;
|
|
|
|
struct in_ifaddr *prim = ifa;
|
2006-09-29 09:00:55 +08:00
|
|
|
__be32 mask = ifa->ifa_mask;
|
|
|
|
__be32 addr = ifa->ifa_local;
|
2010-10-05 04:00:18 +08:00
|
|
|
__be32 prefix = ifa->ifa_address & mask;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-10-05 04:00:18 +08:00
|
|
|
if (ifa->ifa_flags & IFA_F_SECONDARY) {
|
2005-04-17 06:20:36 +08:00
|
|
|
prim = inet_ifa_byprefix(in_dev, prefix, mask);
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!prim) {
|
2012-03-12 02:36:11 +08:00
|
|
|
pr_warn("%s: bug: prim == NULL\n", __func__);
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-27 23:09:57 +08:00
|
|
|
fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-10-05 04:00:18 +08:00
|
|
|
if (!(dev->flags & IFF_UP))
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Add broadcast address, if it is explicitly assigned. */
|
2022-02-19 23:45:19 +08:00
|
|
|
if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF)) {
|
2018-05-27 23:09:57 +08:00
|
|
|
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32,
|
|
|
|
prim, 0);
|
2022-02-19 23:45:19 +08:00
|
|
|
arp_invalidate(dev, ifa->ifa_broadcast, false);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-10-05 04:00:18 +08:00
|
|
|
if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
|
2005-04-17 06:20:36 +08:00
|
|
|
(prefix != addr || ifa->ifa_prefixlen < 32)) {
|
2015-10-20 16:28:45 +08:00
|
|
|
if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
|
|
|
|
fib_magic(RTM_NEWROUTE,
|
|
|
|
dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
|
2018-05-27 23:09:57 +08:00
|
|
|
prefix, ifa->ifa_prefixlen, prim,
|
|
|
|
ifa->ifa_rt_priority);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2021-05-13 12:37:49 +08:00
|
|
|
/* Add the network broadcast address, when it makes sense */
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ifa->ifa_prefixlen < 31) {
|
2010-10-05 04:00:18 +08:00
|
|
|
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
|
2018-05-27 23:09:57 +08:00
|
|
|
32, prim, 0);
|
2022-02-19 23:45:19 +08:00
|
|
|
arp_invalidate(dev, prefix | ~mask, false);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-27 23:09:57 +08:00
|
|
|
void fib_modify_prefix_metric(struct in_ifaddr *ifa, u32 new_metric)
|
|
|
|
{
|
|
|
|
__be32 prefix = ifa->ifa_address & ifa->ifa_mask;
|
|
|
|
struct in_device *in_dev = ifa->ifa_dev;
|
|
|
|
struct net_device *dev = in_dev->dev;
|
|
|
|
|
|
|
|
if (!(dev->flags & IFF_UP) ||
|
|
|
|
ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) ||
|
|
|
|
ipv4_is_zeronet(prefix) ||
|
2019-10-26 17:53:39 +08:00
|
|
|
(prefix == ifa->ifa_local && ifa->ifa_prefixlen == 32))
|
2018-05-27 23:09:57 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* add the new */
|
|
|
|
fib_magic(RTM_NEWROUTE,
|
|
|
|
dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
|
|
|
|
prefix, ifa->ifa_prefixlen, ifa, new_metric);
|
|
|
|
|
|
|
|
/* delete the old */
|
|
|
|
fib_magic(RTM_DELROUTE,
|
|
|
|
dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
|
|
|
|
prefix, ifa->ifa_prefixlen, ifa, ifa->ifa_rt_priority);
|
|
|
|
}
|
|
|
|
|
2011-03-19 20:13:49 +08:00
|
|
|
/* Delete primary or secondary address.
|
|
|
|
* Optionally, on secondary address promotion consider the addresses
|
|
|
|
* from subnet iprim as deleted, even if they are in device list.
|
|
|
|
* In this case the secondary ifa can be in device list.
|
|
|
|
*/
|
|
|
|
void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct in_device *in_dev = ifa->ifa_dev;
|
|
|
|
struct net_device *dev = in_dev->dev;
|
|
|
|
struct in_ifaddr *ifa1;
|
2011-03-19 20:13:49 +08:00
|
|
|
struct in_ifaddr *prim = ifa, *prim1 = NULL;
|
2010-10-05 04:00:18 +08:00
|
|
|
__be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
|
|
|
|
__be32 any = ifa->ifa_address & ifa->ifa_mask;
|
2005-04-17 06:20:36 +08:00
|
|
|
#define LOCAL_OK 1
|
|
|
|
#define BRD_OK 2
|
|
|
|
#define BRD0_OK 4
|
|
|
|
#define BRD1_OK 8
|
2012-04-15 13:58:06 +08:00
|
|
|
unsigned int ok = 0;
|
2011-03-19 20:13:49 +08:00
|
|
|
int subnet = 0; /* Primary network */
|
|
|
|
int gone = 1; /* Address is missing */
|
|
|
|
int same_prefsrc = 0; /* Another primary with same IP */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-19 20:13:49 +08:00
|
|
|
if (ifa->ifa_flags & IFA_F_SECONDARY) {
|
2005-04-17 06:20:36 +08:00
|
|
|
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!prim) {
|
2016-04-22 04:23:31 +08:00
|
|
|
/* if the device has been deleted, we don't perform
|
|
|
|
* address promotion
|
|
|
|
*/
|
|
|
|
if (!in_dev->dead)
|
|
|
|
pr_warn("%s: bug: prim == NULL\n", __func__);
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
}
|
2011-03-19 20:13:49 +08:00
|
|
|
if (iprim && iprim != prim) {
|
2012-03-12 02:36:11 +08:00
|
|
|
pr_warn("%s: bug: iprim != prim\n", __func__);
|
2011-03-19 20:13:49 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else if (!ipv4_is_zeronet(any) &&
|
|
|
|
(any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
|
2015-10-20 16:28:45 +08:00
|
|
|
if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
|
|
|
|
fib_magic(RTM_DELROUTE,
|
|
|
|
dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
|
2018-05-27 23:09:57 +08:00
|
|
|
any, ifa->ifa_prefixlen, prim, 0);
|
2011-03-19 20:13:49 +08:00
|
|
|
subnet = 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2016-03-14 11:28:00 +08:00
|
|
|
if (in_dev->dead)
|
|
|
|
goto no_promotions;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Deletion is more complicated than add.
|
2010-10-05 04:00:18 +08:00
|
|
|
* We should take care of not to delete too much :-)
|
|
|
|
*
|
|
|
|
* Scan address list to be sure that addresses are really gone.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2019-06-01 00:27:07 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
in_dev_for_each_ifa_rcu(ifa1, in_dev) {
|
2011-03-19 20:13:49 +08:00
|
|
|
if (ifa1 == ifa) {
|
|
|
|
/* promotion, keep the IP */
|
|
|
|
gone = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Ignore IFAs from our subnet */
|
|
|
|
if (iprim && ifa1->ifa_mask == iprim->ifa_mask &&
|
|
|
|
inet_ifa_match(ifa1->ifa_address, iprim))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Ignore ifa1 if it uses different primary IP (prefsrc) */
|
|
|
|
if (ifa1->ifa_flags & IFA_F_SECONDARY) {
|
|
|
|
/* Another address from our subnet? */
|
|
|
|
if (ifa1->ifa_mask == prim->ifa_mask &&
|
|
|
|
inet_ifa_match(ifa1->ifa_address, prim))
|
|
|
|
prim1 = prim;
|
|
|
|
else {
|
|
|
|
/* We reached the secondaries, so
|
|
|
|
* same_prefsrc should be determined.
|
|
|
|
*/
|
|
|
|
if (!same_prefsrc)
|
|
|
|
continue;
|
|
|
|
/* Search new prim1 if ifa1 is not
|
|
|
|
* using the current prim1
|
|
|
|
*/
|
|
|
|
if (!prim1 ||
|
|
|
|
ifa1->ifa_mask != prim1->ifa_mask ||
|
|
|
|
!inet_ifa_match(ifa1->ifa_address, prim1))
|
|
|
|
prim1 = inet_ifa_byprefix(in_dev,
|
|
|
|
ifa1->ifa_address,
|
|
|
|
ifa1->ifa_mask);
|
|
|
|
if (!prim1)
|
|
|
|
continue;
|
|
|
|
if (prim1->ifa_local != prim->ifa_local)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (prim->ifa_local != ifa1->ifa_local)
|
|
|
|
continue;
|
|
|
|
prim1 = ifa1;
|
|
|
|
if (prim != prim1)
|
|
|
|
same_prefsrc = 1;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ifa->ifa_local == ifa1->ifa_local)
|
|
|
|
ok |= LOCAL_OK;
|
|
|
|
if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
|
|
|
|
ok |= BRD_OK;
|
|
|
|
if (brd == ifa1->ifa_broadcast)
|
|
|
|
ok |= BRD1_OK;
|
|
|
|
if (any == ifa1->ifa_broadcast)
|
|
|
|
ok |= BRD0_OK;
|
2011-03-19 20:13:49 +08:00
|
|
|
/* primary has network specific broadcasts */
|
|
|
|
if (prim1 == ifa1 && ifa1->ifa_prefixlen < 31) {
|
|
|
|
__be32 brd1 = ifa1->ifa_address | ~ifa1->ifa_mask;
|
|
|
|
__be32 any1 = ifa1->ifa_address & ifa1->ifa_mask;
|
|
|
|
|
|
|
|
if (!ipv4_is_zeronet(any1)) {
|
|
|
|
if (ifa->ifa_broadcast == brd1 ||
|
|
|
|
ifa->ifa_broadcast == any1)
|
|
|
|
ok |= BRD_OK;
|
|
|
|
if (brd == brd1 || brd == any1)
|
|
|
|
ok |= BRD1_OK;
|
|
|
|
if (any == brd1 || any == any1)
|
|
|
|
ok |= BRD0_OK;
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2019-06-01 00:27:07 +08:00
|
|
|
rcu_read_unlock();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-03-14 11:28:00 +08:00
|
|
|
no_promotions:
|
2010-10-05 04:00:18 +08:00
|
|
|
if (!(ok & BRD_OK))
|
2018-05-27 23:09:57 +08:00
|
|
|
fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32,
|
|
|
|
prim, 0);
|
2011-03-19 20:13:49 +08:00
|
|
|
if (subnet && ifa->ifa_prefixlen < 31) {
|
|
|
|
if (!(ok & BRD1_OK))
|
2018-05-27 23:09:57 +08:00
|
|
|
fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32,
|
|
|
|
prim, 0);
|
2011-03-19 20:13:49 +08:00
|
|
|
if (!(ok & BRD0_OK))
|
2018-05-27 23:09:57 +08:00
|
|
|
fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32,
|
|
|
|
prim, 0);
|
2011-03-19 20:13:49 +08:00
|
|
|
}
|
2010-10-05 04:00:18 +08:00
|
|
|
if (!(ok & LOCAL_OK)) {
|
2015-08-14 04:59:05 +08:00
|
|
|
unsigned int addr_type;
|
|
|
|
|
2018-05-27 23:09:57 +08:00
|
|
|
fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Check, that this local address finally disappeared. */
|
2015-08-14 04:59:05 +08:00
|
|
|
addr_type = inet_addr_type_dev_table(dev_net(dev), dev,
|
|
|
|
ifa->ifa_local);
|
|
|
|
if (gone && addr_type != RTN_LOCAL) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* And the last, but not the least thing.
|
2010-10-05 04:00:18 +08:00
|
|
|
* We must flush stray FIB entries.
|
|
|
|
*
|
|
|
|
* First of all, we scan fib_info list searching
|
|
|
|
* for stray nexthop entries, then ignite fib_flush.
|
|
|
|
*/
|
2016-09-05 06:20:20 +08:00
|
|
|
if (fib_sync_down_addr(dev, ifa->ifa_local))
|
2008-03-25 20:47:49 +08:00
|
|
|
fib_flush(dev_net(dev));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#undef LOCAL_OK
|
|
|
|
#undef BRD_OK
|
|
|
|
#undef BRD0_OK
|
|
|
|
#undef BRD1_OK
|
|
|
|
}
|
|
|
|
|
2015-01-01 02:56:24 +08:00
|
|
|
static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
|
2005-06-21 04:36:39 +08:00
|
|
|
{
|
2007-02-09 22:24:47 +08:00
|
|
|
|
2005-06-21 04:36:39 +08:00
|
|
|
struct fib_result res;
|
2011-03-12 15:02:42 +08:00
|
|
|
struct flowi4 fl4 = {
|
|
|
|
.flowi4_mark = frn->fl_mark,
|
|
|
|
.daddr = frn->fl_addr,
|
|
|
|
.flowi4_tos = frn->fl_tos,
|
|
|
|
.flowi4_scope = frn->fl_scope,
|
2010-10-05 04:00:18 +08:00
|
|
|
};
|
2015-01-01 02:56:24 +08:00
|
|
|
struct fib_table *tb;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
tb = fib_get_table(net, frn->tb_id_in);
|
2007-04-26 04:07:28 +08:00
|
|
|
|
|
|
|
frn->err = -ENOENT;
|
2005-06-21 04:36:39 +08:00
|
|
|
if (tb) {
|
|
|
|
local_bh_disable();
|
|
|
|
|
|
|
|
frn->tb_id = tb->tb_id;
|
2011-03-12 15:02:42 +08:00
|
|
|
frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
|
2005-06-21 04:36:39 +08:00
|
|
|
|
|
|
|
if (!frn->err) {
|
|
|
|
frn->prefixlen = res.prefixlen;
|
|
|
|
frn->nh_sel = res.nh_sel;
|
|
|
|
frn->type = res.type;
|
|
|
|
frn->scope = res.scope;
|
|
|
|
}
|
|
|
|
local_bh_enable();
|
|
|
|
}
|
2015-01-01 02:56:24 +08:00
|
|
|
|
|
|
|
rcu_read_unlock();
|
2005-06-21 04:36:39 +08:00
|
|
|
}
|
|
|
|
|
2007-10-11 12:32:39 +08:00
|
|
|
static void nl_fib_input(struct sk_buff *skb)
|
2005-06-21 04:36:39 +08:00
|
|
|
{
|
2008-01-10 19:28:55 +08:00
|
|
|
struct net *net;
|
2005-06-21 04:36:39 +08:00
|
|
|
struct fib_result_nl *frn;
|
2007-10-11 12:32:39 +08:00
|
|
|
struct nlmsghdr *nlh;
|
2012-09-08 04:12:54 +08:00
|
|
|
u32 portid;
|
2007-04-26 04:07:28 +08:00
|
|
|
|
2008-03-26 01:26:21 +08:00
|
|
|
net = sock_net(skb->sk);
|
2007-04-26 10:08:35 +08:00
|
|
|
nlh = nlmsg_hdr(skb);
|
2017-03-22 10:22:28 +08:00
|
|
|
if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
|
|
|
|
skb->len < nlh->nlmsg_len ||
|
2013-03-27 14:47:04 +08:00
|
|
|
nlmsg_len(nlh) < sizeof(*frn))
|
2005-12-02 06:30:00 +08:00
|
|
|
return;
|
2007-12-21 18:01:53 +08:00
|
|
|
|
2013-06-28 09:04:23 +08:00
|
|
|
skb = netlink_skb_clone(skb, GFP_KERNEL);
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!skb)
|
2007-12-21 18:01:53 +08:00
|
|
|
return;
|
|
|
|
nlh = nlmsg_hdr(skb);
|
2007-02-09 22:24:47 +08:00
|
|
|
|
2022-04-29 10:14:04 +08:00
|
|
|
frn = nlmsg_data(nlh);
|
2015-01-01 02:56:24 +08:00
|
|
|
nl_fib_lookup(net, frn);
|
2007-02-09 22:24:47 +08:00
|
|
|
|
2013-01-11 17:59:20 +08:00
|
|
|
portid = NETLINK_CB(skb).portid; /* netlink portid */
|
2012-09-08 04:12:54 +08:00
|
|
|
NETLINK_CB(skb).portid = 0; /* from kernel */
|
2005-08-15 10:29:52 +08:00
|
|
|
NETLINK_CB(skb).dst_group = 0; /* unicast */
|
2021-07-13 10:48:24 +08:00
|
|
|
nlmsg_unicast(net->ipv4.fibnl, skb, portid);
|
2007-02-09 22:24:47 +08:00
|
|
|
}
|
2005-06-21 04:36:39 +08:00
|
|
|
|
2010-01-17 11:35:32 +08:00
|
|
|
static int __net_init nl_fib_lookup_init(struct net *net)
|
2005-06-21 04:36:39 +08:00
|
|
|
{
|
2008-01-10 19:28:55 +08:00
|
|
|
struct sock *sk;
|
2012-06-29 14:15:21 +08:00
|
|
|
struct netlink_kernel_cfg cfg = {
|
|
|
|
.input = nl_fib_input,
|
|
|
|
};
|
|
|
|
|
2012-09-08 10:53:54 +08:00
|
|
|
sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!sk)
|
2008-01-10 19:22:17 +08:00
|
|
|
return -EAFNOSUPPORT;
|
2008-01-10 19:28:55 +08:00
|
|
|
net->ipv4.fibnl = sk;
|
2008-01-10 19:22:17 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nl_fib_lookup_exit(struct net *net)
|
|
|
|
{
|
2008-01-29 06:41:19 +08:00
|
|
|
netlink_kernel_release(net->ipv4.fibnl);
|
2008-01-19 15:55:19 +08:00
|
|
|
net->ipv4.fibnl = NULL;
|
2005-06-21 04:36:39 +08:00
|
|
|
}
|
|
|
|
|
2015-10-30 16:23:33 +08:00
|
|
|
static void fib_disable_ip(struct net_device *dev, unsigned long event,
|
|
|
|
bool force)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2015-10-30 16:23:33 +08:00
|
|
|
if (fib_sync_down_dev(dev, event, force))
|
2008-03-25 20:47:49 +08:00
|
|
|
fib_flush(dev_net(dev));
|
2017-04-26 19:04:04 +08:00
|
|
|
else
|
|
|
|
rt_cache_flush(dev_net(dev));
|
2005-04-17 06:20:36 +08:00
|
|
|
arp_ifdown(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
|
|
|
|
{
|
2022-04-29 10:14:04 +08:00
|
|
|
struct in_ifaddr *ifa = ptr;
|
2008-07-06 10:00:44 +08:00
|
|
|
struct net_device *dev = ifa->ifa_dev->dev;
|
2011-03-25 08:42:21 +08:00
|
|
|
struct net *net = dev_net(dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case NETDEV_UP:
|
|
|
|
fib_add_ifaddr(ifa);
|
|
|
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
2015-06-24 01:45:36 +08:00
|
|
|
fib_sync_up(dev, RTNH_F_DEAD);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2011-03-25 08:42:21 +08:00
|
|
|
atomic_inc(&net->ipv4.dev_addr_genid);
|
2012-09-07 08:45:29 +08:00
|
|
|
rt_cache_flush(dev_net(dev));
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
case NETDEV_DOWN:
|
2011-03-19 20:13:49 +08:00
|
|
|
fib_del_ifaddr(ifa, NULL);
|
2011-03-25 08:42:21 +08:00
|
|
|
atomic_inc(&net->ipv4.dev_addr_genid);
|
2015-04-03 16:17:26 +08:00
|
|
|
if (!ifa->ifa_dev->ifa_list) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Last address was deleted from this interface.
|
2010-10-05 04:00:18 +08:00
|
|
|
* Disable IP.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2015-10-30 16:23:33 +08:00
|
|
|
fib_disable_ip(dev, event, true);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2012-09-07 08:45:29 +08:00
|
|
|
rt_cache_flush(dev_net(dev));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
|
|
|
|
{
|
2013-05-28 09:30:21 +08:00
|
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
net: ipv4: update fnhe_pmtu when first hop's MTU changes
Since commit 5aad1de5ea2c ("ipv4: use separate genid for next hop
exceptions"), exceptions get deprecated separately from cached
routes. In particular, administrative changes don't clear PMTU anymore.
As Stefano described in commit e9fa1495d738 ("ipv6: Reflect MTU changes
on PMTU of exceptions for MTU-less routes"), the PMTU discovered before
the local MTU change can become stale:
- if the local MTU is now lower than the PMTU, that PMTU is now
incorrect
- if the local MTU was the lowest value in the path, and is increased,
we might discover a higher PMTU
Similarly to what commit e9fa1495d738 did for IPv6, update PMTU in those
cases.
If the exception was locked, the discovered PMTU was smaller than the
minimal accepted PMTU. In that case, if the new local MTU is smaller
than the current PMTU, let PMTU discovery figure out if locking of the
exception is still needed.
To do this, we need to know the old link MTU in the NETDEV_CHANGEMTU
notifier. By the time the notifier is called, dev->mtu has been
changed. This patch adds the old MTU as additional information in the
notifier structure, and a new call_netdevice_notifiers_u32() function.
Fixes: 5aad1de5ea2c ("ipv4: use separate genid for next hop exceptions")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-09 23:48:14 +08:00
|
|
|
struct netdev_notifier_changeupper_info *upper_info = ptr;
|
|
|
|
struct netdev_notifier_info_ext *info_ext = ptr;
|
2012-08-23 01:19:46 +08:00
|
|
|
struct in_device *in_dev;
|
2011-03-25 08:42:21 +08:00
|
|
|
struct net *net = dev_net(dev);
|
2019-06-01 00:27:07 +08:00
|
|
|
struct in_ifaddr *ifa;
|
2015-06-24 01:45:36 +08:00
|
|
|
unsigned int flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (event == NETDEV_UNREGISTER) {
|
2015-10-30 16:23:33 +08:00
|
|
|
fib_disable_ip(dev, event, true);
|
2012-08-01 06:06:50 +08:00
|
|
|
rt_flush_dev(dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2012-08-23 01:19:46 +08:00
|
|
|
in_dev = __in_dev_get_rtnl(dev);
|
2014-01-23 17:19:34 +08:00
|
|
|
if (!in_dev)
|
|
|
|
return NOTIFY_DONE;
|
2012-08-23 01:19:46 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
switch (event) {
|
|
|
|
case NETDEV_UP:
|
2019-06-01 00:27:07 +08:00
|
|
|
in_dev_for_each_ifa_rtnl(ifa, in_dev) {
|
2005-04-17 06:20:36 +08:00
|
|
|
fib_add_ifaddr(ifa);
|
2019-06-01 00:27:07 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
2015-06-24 01:45:36 +08:00
|
|
|
fib_sync_up(dev, RTNH_F_DEAD);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2011-03-25 08:42:21 +08:00
|
|
|
atomic_inc(&net->ipv4.dev_addr_genid);
|
2012-09-07 08:45:29 +08:00
|
|
|
rt_cache_flush(net);
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
case NETDEV_DOWN:
|
2015-10-30 16:23:33 +08:00
|
|
|
fib_disable_ip(dev, event, false);
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
case NETDEV_CHANGE:
|
2015-06-24 01:45:36 +08:00
|
|
|
flags = dev_get_flags(dev);
|
|
|
|
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
|
|
|
|
fib_sync_up(dev, RTNH_F_LINKDOWN);
|
|
|
|
else
|
2015-10-30 16:23:33 +08:00
|
|
|
fib_sync_down_dev(dev, event, false);
|
net: ipv4: update fnhe_pmtu when first hop's MTU changes
Since commit 5aad1de5ea2c ("ipv4: use separate genid for next hop
exceptions"), exceptions get deprecated separately from cached
routes. In particular, administrative changes don't clear PMTU anymore.
As Stefano described in commit e9fa1495d738 ("ipv6: Reflect MTU changes
on PMTU of exceptions for MTU-less routes"), the PMTU discovered before
the local MTU change can become stale:
- if the local MTU is now lower than the PMTU, that PMTU is now
incorrect
- if the local MTU was the lowest value in the path, and is increased,
we might discover a higher PMTU
Similarly to what commit e9fa1495d738 did for IPv6, update PMTU in those
cases.
If the exception was locked, the discovered PMTU was smaller than the
minimal accepted PMTU. In that case, if the new local MTU is smaller
than the current PMTU, let PMTU discovery figure out if locking of the
exception is still needed.
To do this, we need to know the old link MTU in the NETDEV_CHANGEMTU
notifier. By the time the notifier is called, dev->mtu has been
changed. This patch adds the old MTU as additional information in the
notifier structure, and a new call_netdevice_notifiers_u32() function.
Fixes: 5aad1de5ea2c ("ipv4: use separate genid for next hop exceptions")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-09 23:48:14 +08:00
|
|
|
rt_cache_flush(net);
|
|
|
|
break;
|
2015-06-24 01:45:36 +08:00
|
|
|
case NETDEV_CHANGEMTU:
|
net: ipv4: update fnhe_pmtu when first hop's MTU changes
Since commit 5aad1de5ea2c ("ipv4: use separate genid for next hop
exceptions"), exceptions get deprecated separately from cached
routes. In particular, administrative changes don't clear PMTU anymore.
As Stefano described in commit e9fa1495d738 ("ipv6: Reflect MTU changes
on PMTU of exceptions for MTU-less routes"), the PMTU discovered before
the local MTU change can become stale:
- if the local MTU is now lower than the PMTU, that PMTU is now
incorrect
- if the local MTU was the lowest value in the path, and is increased,
we might discover a higher PMTU
Similarly to what commit e9fa1495d738 did for IPv6, update PMTU in those
cases.
If the exception was locked, the discovered PMTU was smaller than the
minimal accepted PMTU. In that case, if the new local MTU is smaller
than the current PMTU, let PMTU discovery figure out if locking of the
exception is still needed.
To do this, we need to know the old link MTU in the NETDEV_CHANGEMTU
notifier. By the time the notifier is called, dev->mtu has been
changed. This patch adds the old MTU as additional information in the
notifier structure, and a new call_netdevice_notifiers_u32() function.
Fixes: 5aad1de5ea2c ("ipv4: use separate genid for next hop exceptions")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-09 23:48:14 +08:00
|
|
|
fib_sync_mtu(dev, info_ext->ext.mtu);
|
2012-09-07 08:45:29 +08:00
|
|
|
rt_cache_flush(net);
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
2015-12-11 02:25:24 +08:00
|
|
|
case NETDEV_CHANGEUPPER:
|
net: ipv4: update fnhe_pmtu when first hop's MTU changes
Since commit 5aad1de5ea2c ("ipv4: use separate genid for next hop
exceptions"), exceptions get deprecated separately from cached
routes. In particular, administrative changes don't clear PMTU anymore.
As Stefano described in commit e9fa1495d738 ("ipv6: Reflect MTU changes
on PMTU of exceptions for MTU-less routes"), the PMTU discovered before
the local MTU change can become stale:
- if the local MTU is now lower than the PMTU, that PMTU is now
incorrect
- if the local MTU was the lowest value in the path, and is increased,
we might discover a higher PMTU
Similarly to what commit e9fa1495d738 did for IPv6, update PMTU in those
cases.
If the exception was locked, the discovered PMTU was smaller than the
minimal accepted PMTU. In that case, if the new local MTU is smaller
than the current PMTU, let PMTU discovery figure out if locking of the
exception is still needed.
To do this, we need to know the old link MTU in the NETDEV_CHANGEMTU
notifier. By the time the notifier is called, dev->mtu has been
changed. This patch adds the old MTU as additional information in the
notifier structure, and a new call_netdevice_notifiers_u32() function.
Fixes: 5aad1de5ea2c ("ipv4: use separate genid for next hop exceptions")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-09 23:48:14 +08:00
|
|
|
upper_info = ptr;
|
2015-12-11 02:25:24 +08:00
|
|
|
/* flush all routes if dev is linked to or unlinked from
|
|
|
|
* an L3 master device (e.g., VRF)
|
|
|
|
*/
|
net: ipv4: update fnhe_pmtu when first hop's MTU changes
Since commit 5aad1de5ea2c ("ipv4: use separate genid for next hop
exceptions"), exceptions get deprecated separately from cached
routes. In particular, administrative changes don't clear PMTU anymore.
As Stefano described in commit e9fa1495d738 ("ipv6: Reflect MTU changes
on PMTU of exceptions for MTU-less routes"), the PMTU discovered before
the local MTU change can become stale:
- if the local MTU is now lower than the PMTU, that PMTU is now
incorrect
- if the local MTU was the lowest value in the path, and is increased,
we might discover a higher PMTU
Similarly to what commit e9fa1495d738 did for IPv6, update PMTU in those
cases.
If the exception was locked, the discovered PMTU was smaller than the
minimal accepted PMTU. In that case, if the new local MTU is smaller
than the current PMTU, let PMTU discovery figure out if locking of the
exception is still needed.
To do this, we need to know the old link MTU in the NETDEV_CHANGEMTU
notifier. By the time the notifier is called, dev->mtu has been
changed. This patch adds the old MTU as additional information in the
notifier structure, and a new call_netdevice_notifiers_u32() function.
Fixes: 5aad1de5ea2c ("ipv4: use separate genid for next hop exceptions")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-09 23:48:14 +08:00
|
|
|
if (upper_info->upper_dev &&
|
|
|
|
netif_is_l3_master(upper_info->upper_dev))
|
2015-12-11 02:25:24 +08:00
|
|
|
fib_disable_ip(dev, NETDEV_DOWN, true);
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block fib_inetaddr_notifier = {
|
2008-11-03 16:25:16 +08:00
|
|
|
.notifier_call = fib_inetaddr_event,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct notifier_block fib_netdev_notifier = {
|
2008-11-03 16:25:16 +08:00
|
|
|
.notifier_call = fib_netdev_event,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2008-01-10 19:22:17 +08:00
|
|
|
static int __net_init ip_fib_net_init(struct net *net)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-02-01 10:44:53 +08:00
|
|
|
int err;
|
2010-10-13 16:22:03 +08:00
|
|
|
size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ;
|
|
|
|
|
2017-08-03 19:28:11 +08:00
|
|
|
err = fib4_notifier_init(net);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2016-12-03 23:45:06 +08:00
|
|
|
|
2021-05-18 02:15:18 +08:00
|
|
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
|
|
|
/* Default to 3-tuple */
|
|
|
|
net->ipv4.sysctl_fib_multipath_hash_fields =
|
|
|
|
FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK;
|
|
|
|
#endif
|
|
|
|
|
2010-10-13 16:22:03 +08:00
|
|
|
/* Avoid false sharing : Use at least a full cache line */
|
|
|
|
size = max_t(size_t, size, L1_CACHE_BYTES);
|
2006-08-11 14:10:46 +08:00
|
|
|
|
2010-10-13 16:22:03 +08:00
|
|
|
net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
|
2017-08-03 19:28:11 +08:00
|
|
|
if (!net->ipv4.fib_table_hash) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_table_hash_alloc;
|
|
|
|
}
|
2008-01-10 19:28:24 +08:00
|
|
|
|
2008-02-01 10:44:53 +08:00
|
|
|
err = fib4_rules_init(net);
|
|
|
|
if (err < 0)
|
2017-08-03 19:28:11 +08:00
|
|
|
goto err_rules_init;
|
2008-02-01 10:44:53 +08:00
|
|
|
return 0;
|
|
|
|
|
2017-08-03 19:28:11 +08:00
|
|
|
err_rules_init:
|
2008-02-01 10:44:53 +08:00
|
|
|
kfree(net->ipv4.fib_table_hash);
|
2017-08-03 19:28:11 +08:00
|
|
|
err_table_hash_alloc:
|
|
|
|
fib4_notifier_exit(net);
|
2008-02-01 10:44:53 +08:00
|
|
|
return err;
|
2008-01-10 19:22:17 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-01-17 11:35:32 +08:00
|
|
|
static void ip_fib_net_exit(struct net *net)
|
2008-01-10 19:22:17 +08:00
|
|
|
{
|
2017-12-21 01:34:19 +08:00
|
|
|
int i;
|
2008-01-10 19:22:17 +08:00
|
|
|
|
2022-02-08 12:50:32 +08:00
|
|
|
ASSERT_RTNL();
|
2015-03-28 05:14:22 +08:00
|
|
|
#ifdef CONFIG_IP_MULTIPLE_TABLES
|
|
|
|
RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
|
|
|
|
RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
|
|
|
|
#endif
|
2017-12-21 01:34:19 +08:00
|
|
|
/* Destroy the tables in reverse order to guarantee that the
|
|
|
|
* local table, ID 255, is destroyed before the main table, ID
|
|
|
|
* 254. This is necessary as the local table may contain
|
|
|
|
* references to data contained in the main table.
|
|
|
|
*/
|
|
|
|
for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
|
2015-03-05 07:02:44 +08:00
|
|
|
struct hlist_head *head = &net->ipv4.fib_table_hash[i];
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
struct hlist_node *tmp;
|
2015-03-05 07:02:44 +08:00
|
|
|
struct fib_table *tb;
|
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
|
|
|
|
hlist_del(&tb->tb_hlist);
|
2019-01-09 17:57:39 +08:00
|
|
|
fib_table_flush(net, tb, true);
|
2010-10-28 10:00:43 +08:00
|
|
|
fib_free_table(tb);
|
2008-01-10 19:22:17 +08:00
|
|
|
}
|
|
|
|
}
|
2015-03-28 05:14:16 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_IP_MULTIPLE_TABLES
|
|
|
|
fib4_rules_exit(net);
|
|
|
|
#endif
|
2022-02-08 12:50:32 +08:00
|
|
|
|
2008-01-10 19:28:24 +08:00
|
|
|
kfree(net->ipv4.fib_table_hash);
|
2017-08-03 19:28:11 +08:00
|
|
|
fib4_notifier_exit(net);
|
2008-01-10 19:22:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __net_init fib_net_init(struct net *net)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2012-07-06 13:13:13 +08:00
|
|
|
#ifdef CONFIG_IP_ROUTE_CLASSID
|
2021-12-02 10:26:35 +08:00
|
|
|
atomic_set(&net->ipv4.fib_num_tclassid_users, 0);
|
2012-07-06 13:13:13 +08:00
|
|
|
#endif
|
2008-01-10 19:22:17 +08:00
|
|
|
error = ip_fib_net_init(net);
|
|
|
|
if (error < 0)
|
|
|
|
goto out;
|
|
|
|
error = nl_fib_lookup_init(net);
|
|
|
|
if (error < 0)
|
|
|
|
goto out_nlfl;
|
|
|
|
error = fib_proc_init(net);
|
|
|
|
if (error < 0)
|
|
|
|
goto out_proc;
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
|
|
|
|
out_proc:
|
|
|
|
nl_fib_lookup_exit(net);
|
|
|
|
out_nlfl:
|
2022-02-08 12:50:32 +08:00
|
|
|
rtnl_lock();
|
2008-01-10 19:22:17 +08:00
|
|
|
ip_fib_net_exit(net);
|
2022-02-08 12:50:32 +08:00
|
|
|
rtnl_unlock();
|
2008-01-10 19:22:17 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __net_exit fib_net_exit(struct net *net)
|
|
|
|
{
|
|
|
|
fib_proc_exit(net);
|
|
|
|
nl_fib_lookup_exit(net);
|
2022-02-08 12:50:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __net_exit fib_net_exit_batch(struct list_head *net_list)
|
|
|
|
{
|
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
list_for_each_entry(net, net_list, exit_list)
|
|
|
|
ip_fib_net_exit(net);
|
|
|
|
|
|
|
|
rtnl_unlock();
|
2008-01-10 19:22:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations fib_net_ops = {
|
|
|
|
.init = fib_net_init,
|
|
|
|
.exit = fib_net_exit,
|
2022-02-08 12:50:32 +08:00
|
|
|
.exit_batch = fib_net_exit_batch,
|
2008-01-10 19:22:17 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
void __init ip_fib_init(void)
|
|
|
|
{
|
2017-07-20 06:41:33 +08:00
|
|
|
fib_trie_init();
|
2008-01-10 19:22:17 +08:00
|
|
|
|
|
|
|
register_pernet_subsys(&fib_net_ops);
|
2017-07-20 06:41:33 +08:00
|
|
|
|
2008-01-10 19:22:17 +08:00
|
|
|
register_netdevice_notifier(&fib_netdev_notifier);
|
|
|
|
register_inetaddr_notifier(&fib_inetaddr_notifier);
|
2008-01-15 15:14:20 +08:00
|
|
|
|
2017-08-10 02:41:48 +08:00
|
|
|
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0);
|
|
|
|
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0);
|
|
|
|
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|