2005-04-17 06:20:36 +08:00
|
|
|
#ifndef __LINUX_RTNETLINK_H
|
|
|
|
#define __LINUX_RTNETLINK_H
|
|
|
|
|
|
|
|
|
2006-03-21 14:23:58 +08:00
|
|
|
#include <linux/mutex.h>
|
2010-11-15 14:01:59 +08:00
|
|
|
#include <linux/netdevice.h>
|
2014-05-13 06:11:20 +08:00
|
|
|
#include <linux/wait.h>
|
2012-10-13 17:46:48 +08:00
|
|
|
#include <uapi/linux/rtnetlink.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-11-20 14:26:51 +08:00
|
|
|
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
|
|
|
|
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
|
2009-02-25 15:18:28 +08:00
|
|
|
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
|
|
|
|
u32 group, struct nlmsghdr *nlh, gfp_t flags);
|
2007-11-20 14:26:51 +08:00
|
|
|
extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
|
2005-04-17 06:20:36 +08:00
|
|
|
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
|
2006-11-28 01:27:07 +08:00
|
|
|
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
|
2012-07-10 20:06:14 +08:00
|
|
|
u32 id, long expires, u32 error);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-10-24 07:02:42 +08:00
|
|
|
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
|
2014-12-04 05:46:24 +08:00
|
|
|
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
|
|
|
|
unsigned change, gfp_t flags);
|
|
|
|
void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
gfp_t flags);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-21 14:23:58 +08:00
|
|
|
/* RTNL is used as a global lock for all changes to network configuration */
|
2005-04-17 06:20:36 +08:00
|
|
|
extern void rtnl_lock(void);
|
|
|
|
extern void rtnl_unlock(void);
|
2006-03-21 14:23:58 +08:00
|
|
|
extern int rtnl_trylock(void);
|
2008-04-24 13:10:48 +08:00
|
|
|
extern int rtnl_is_locked(void);
|
2014-05-13 06:11:20 +08:00
|
|
|
|
|
|
|
extern wait_queue_head_t netdev_unregistering_wq;
|
|
|
|
extern struct mutex net_mutex;
|
|
|
|
|
2010-02-23 09:04:49 +08:00
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
2015-10-08 21:29:02 +08:00
|
|
|
extern bool lockdep_rtnl_is_held(void);
|
2013-11-26 14:33:52 +08:00
|
|
|
#else
|
2015-10-08 21:29:02 +08:00
|
|
|
static inline bool lockdep_rtnl_is_held(void)
|
2013-11-26 14:33:52 +08:00
|
|
|
{
|
2015-10-08 21:29:02 +08:00
|
|
|
return true;
|
2013-11-26 14:33:52 +08:00
|
|
|
}
|
2010-02-23 09:04:49 +08:00
|
|
|
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
2006-03-21 14:23:58 +08:00
|
|
|
|
2010-09-09 05:15:32 +08:00
|
|
|
/**
|
|
|
|
* rcu_dereference_rtnl - rcu_dereference with debug checking
|
|
|
|
* @p: The pointer to read, prior to dereferencing
|
|
|
|
*
|
|
|
|
* Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
|
2010-10-05 15:29:48 +08:00
|
|
|
* or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference()
|
2010-09-09 05:15:32 +08:00
|
|
|
*/
|
|
|
|
#define rcu_dereference_rtnl(p) \
|
2011-07-08 20:39:41 +08:00
|
|
|
rcu_dereference_check(p, lockdep_rtnl_is_held())
|
2010-09-09 05:15:32 +08:00
|
|
|
|
2014-09-13 11:08:20 +08:00
|
|
|
/**
|
|
|
|
* rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking
|
|
|
|
* @p: The pointer to read, prior to dereference
|
|
|
|
*
|
|
|
|
* Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh()
|
|
|
|
* or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh()
|
|
|
|
*/
|
|
|
|
#define rcu_dereference_bh_rtnl(p) \
|
|
|
|
rcu_dereference_bh_check(p, lockdep_rtnl_is_held())
|
|
|
|
|
2010-09-15 19:07:15 +08:00
|
|
|
/**
|
2010-10-05 15:29:48 +08:00
|
|
|
* rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
|
2010-09-15 19:07:15 +08:00
|
|
|
* @p: The pointer to read, prior to dereferencing
|
|
|
|
*
|
2010-10-05 15:29:48 +08:00
|
|
|
* Return the value of the specified RCU-protected pointer, but omit
|
|
|
|
* both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
|
|
|
|
* caller holds RTNL.
|
2010-09-15 19:07:15 +08:00
|
|
|
*/
|
|
|
|
#define rtnl_dereference(p) \
|
2010-10-05 15:29:48 +08:00
|
|
|
rcu_dereference_protected(p, lockdep_rtnl_is_held())
|
2010-09-15 19:07:15 +08:00
|
|
|
|
2010-10-02 14:11:55 +08:00
|
|
|
static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
|
|
|
|
{
|
|
|
|
return rtnl_dereference(dev->ingress_queue);
|
|
|
|
}
|
|
|
|
|
net: use jump label patching for ingress qdisc in __netif_receive_skb_core
Even if we make use of classifier and actions from the egress
path, we're going into handle_ing() executing additional code
on a per-packet cost for ingress qdisc, just to realize that
nothing is attached on ingress.
Instead, this can just be blinded out as a no-op entirely with
the use of a static key. On input fast-path, we already make
use of static keys in various places, e.g. skb time stamping,
in RPS, etc. It makes sense to not waste time when we're assured
that no ingress qdisc is attached anywhere.
Enabling/disabling of that code path is being done via two
helpers, namely net_{inc,dec}_ingress_queue(), that are being
invoked under RTNL mutex when a ingress qdisc is being either
initialized or destructed.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-04-11 05:07:54 +08:00
|
|
|
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
|
|
|
|
|
2015-05-14 00:19:37 +08:00
|
|
|
#ifdef CONFIG_NET_INGRESS
|
net: use jump label patching for ingress qdisc in __netif_receive_skb_core
Even if we make use of classifier and actions from the egress
path, we're going into handle_ing() executing additional code
on a per-packet cost for ingress qdisc, just to realize that
nothing is attached on ingress.
Instead, this can just be blinded out as a no-op entirely with
the use of a static key. On input fast-path, we already make
use of static keys in various places, e.g. skb time stamping,
in RPS, etc. It makes sense to not waste time when we're assured
that no ingress qdisc is attached anywhere.
Enabling/disabling of that code path is being done via two
helpers, namely net_{inc,dec}_ingress_queue(), that are being
invoked under RTNL mutex when a ingress qdisc is being either
initialized or destructed.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-04-11 05:07:54 +08:00
|
|
|
void net_inc_ingress_queue(void);
|
|
|
|
void net_dec_ingress_queue(void);
|
|
|
|
#endif
|
2010-10-02 14:11:55 +08:00
|
|
|
|
net, sched: add clsact qdisc
This work adds a generalization of the ingress qdisc as a qdisc holding
only classifiers. The clsact qdisc works on ingress, but also on egress.
In both cases, it's execution happens without taking the qdisc lock, and
the main difference for the egress part compared to prior version of [1]
is that this can be applied with _any_ underlying real egress qdisc (also
classless ones).
Besides solving the use-case of [1], that is, allowing for more programmability
on assigning skb->priority for the mqprio case that is supported by most
popular 10G+ NICs, it also opens up a lot more flexibility for other tc
applications. The main work on classification can already be done at clsact
egress time if the use-case allows and state stored for later retrieval
f.e. again in skb->priority with major/minors (which is checked by most
classful qdiscs before consulting tc_classify()) and/or in other skb fields
like skb->tc_index for some light-weight post-processing to get to the
eventual classid in case of a classful qdisc. Another use case is that
the clsact egress part allows to have a central egress counterpart to
the ingress classifiers, so that classifiers can easily share state (e.g.
in cls_bpf via eBPF maps) for ingress and egress.
Currently, default setups like mq + pfifo_fast would require for this to
use, for example, prio qdisc instead (to get a tc_classify() run) and to
duplicate the egress classifier for each queue. With clsact, it allows
for leaving the setup as is, it can additionally assign skb->priority to
put the skb in one of pfifo_fast's bands and it can share state with maps.
Moreover, we can access the skb's dst entry (f.e. to retrieve tclassid)
w/o the need to perform a skb_dst_force() to hold on to it any longer. In
lwt case, we can also use this facility to setup dst metadata via cls_bpf
(bpf_skb_set_tunnel_key()) without needing a real egress qdisc just for
that (case of IFF_NO_QUEUE devices, for example).
The realization can be done without any changes to the scheduler core
framework. All it takes is that we have two a-priori defined minors/child
classes, where we can mux between ingress and egress classifier list
(dev->ingress_cl_list and dev->egress_cl_list, latter stored close to
dev->_tx to avoid extra cacheline miss for moderate loads). The egress
part is a bit similar modelled to handle_ing() and patched to a noop in
case the functionality is not used. Both handlers are now called
sch_handle_ingress() and sch_handle_egress(), code sharing among the two
doesn't seem practical as there are various minor differences in both
paths, so that making them conditional in a single handler would rather
slow things down.
Full compatibility to ingress qdisc is provided as well. Since both
piggyback on TC_H_CLSACT, only one of them (ingress/clsact) can exist
per netdevice, and thus ingress qdisc specific behaviour can be retained
for user space. This means, either a user does 'tc qdisc add dev foo ingress'
and configures ingress qdisc as usual, or the 'tc qdisc add dev foo clsact'
alternative, where both, ingress and egress classifier can be configured
as in the below example. ingress qdisc supports attaching classifier to any
minor number whereas clsact has two fixed minors for muxing between the
lists, therefore to not break user space setups, they are better done as
two separate qdiscs.
I decided to extend the sch_ingress module with clsact functionality so
that commonly used code can be reused, the module is being aliased with
sch_clsact so that it can be auto-loaded properly. Alternative would have been
to add a flag when initializing ingress to alter its behaviour plus aliasing
to a different name (as it's more than just ingress). However, the first would
end up, based on the flag, choosing the new/old behaviour by calling different
function implementations to handle each anyway, the latter would require to
register ingress qdisc once again under different alias. So, this really begs
to provide a minimal, cleaner approach to have Qdisc_ops and Qdisc_class_ops
by its own that share callbacks used by both.
Example, adding qdisc:
# tc qdisc add dev foo clsact
# tc qdisc show dev foo
qdisc mq 0: root
qdisc pfifo_fast 0: parent :1 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1
qdisc pfifo_fast 0: parent :2 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1
qdisc pfifo_fast 0: parent :3 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1
qdisc pfifo_fast 0: parent :4 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1
qdisc clsact ffff: parent ffff:fff1
Adding filters (deleting, etc works analogous by specifying ingress/egress):
# tc filter add dev foo ingress bpf da obj bar.o sec ingress
# tc filter add dev foo egress bpf da obj bar.o sec egress
# tc filter show dev foo ingress
filter protocol all pref 49152 bpf
filter protocol all pref 49152 bpf handle 0x1 bar.o:[ingress] direct-action
# tc filter show dev foo egress
filter protocol all pref 49152 bpf
filter protocol all pref 49152 bpf handle 0x1 bar.o:[egress] direct-action
A 'tc filter show dev foo' or 'tc filter show dev foo parent ffff:' will
show an empty list for clsact. Either using the parent names (ingress/egress)
or specifying the full major/minor will then show the related filter lists.
Prior work on a mqprio prequeue() facility [1] was done mainly by John Fastabend.
[1] http://patchwork.ozlabs.org/patch/512949/
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-01-08 05:29:47 +08:00
|
|
|
#ifdef CONFIG_NET_EGRESS
|
|
|
|
void net_inc_egress_queue(void);
|
|
|
|
void net_dec_egress_queue(void);
|
|
|
|
#endif
|
|
|
|
|
2016-06-14 11:21:50 +08:00
|
|
|
void rtnetlink_init(void);
|
|
|
|
void __rtnl_unlock(void);
|
|
|
|
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#define ASSERT_RTNL() do { \
|
2008-04-24 13:10:48 +08:00
|
|
|
if (unlikely(!rtnl_is_locked())) { \
|
2005-04-17 06:20:36 +08:00
|
|
|
printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
|
|
|
|
__FILE__, __LINE__); \
|
|
|
|
dump_stack(); \
|
|
|
|
} \
|
|
|
|
} while(0)
|
|
|
|
|
2012-04-15 14:43:56 +08:00
|
|
|
extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
|
|
|
|
struct netlink_callback *cb,
|
|
|
|
struct net_device *dev,
|
2014-07-10 19:01:58 +08:00
|
|
|
struct net_device *filter_dev,
|
2016-08-31 12:56:45 +08:00
|
|
|
int *idx);
|
2013-03-06 23:39:42 +08:00
|
|
|
extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
|
|
|
|
struct nlattr *tb[],
|
|
|
|
struct net_device *dev,
|
|
|
|
const unsigned char *addr,
|
2014-11-28 21:34:15 +08:00
|
|
|
u16 vid,
|
|
|
|
u16 flags);
|
2013-03-06 23:39:42 +08:00
|
|
|
extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
|
|
|
|
struct nlattr *tb[],
|
|
|
|
struct net_device *dev,
|
2014-11-28 21:34:15 +08:00
|
|
|
const unsigned char *addr,
|
|
|
|
u16 vid);
|
2012-10-24 16:13:09 +08:00
|
|
|
|
|
|
|
extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
2014-11-28 21:34:25 +08:00
|
|
|
struct net_device *dev, u16 mode,
|
2015-06-22 15:27:17 +08:00
|
|
|
u32 flags, u32 mask, int nlflags,
|
|
|
|
u32 filter_mask,
|
|
|
|
int (*vlan_fill)(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
|
|
|
u32 filter_mask));
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* __LINUX_RTNETLINK_H */
|