2005-04-17 06:20:36 +08:00
|
|
|
#ifndef __LINUX_RTNETLINK_H
|
|
|
|
#define __LINUX_RTNETLINK_H
|
|
|
|
|
|
|
|
|
2006-03-21 14:23:58 +08:00
|
|
|
#include <linux/mutex.h>
|
2010-11-15 14:01:59 +08:00
|
|
|
#include <linux/netdevice.h>
|
2014-05-13 06:11:20 +08:00
|
|
|
#include <linux/wait.h>
|
2012-10-13 17:46:48 +08:00
|
|
|
#include <uapi/linux/rtnetlink.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-11-20 14:26:51 +08:00
|
|
|
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
|
|
|
|
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
|
2009-02-25 15:18:28 +08:00
|
|
|
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
|
|
|
|
u32 group, struct nlmsghdr *nlh, gfp_t flags);
|
2007-11-20 14:26:51 +08:00
|
|
|
extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
|
2005-04-17 06:20:36 +08:00
|
|
|
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
|
2006-11-28 01:27:07 +08:00
|
|
|
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
|
2012-07-10 20:06:14 +08:00
|
|
|
u32 id, long expires, u32 error);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-10-24 07:02:42 +08:00
|
|
|
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
|
2014-12-04 05:46:24 +08:00
|
|
|
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
|
|
|
|
unsigned change, gfp_t flags);
|
|
|
|
void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
gfp_t flags);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-21 14:23:58 +08:00
|
|
|
/* RTNL is used as a global lock for all changes to network configuration */
|
2005-04-17 06:20:36 +08:00
|
|
|
extern void rtnl_lock(void);
|
|
|
|
extern void rtnl_unlock(void);
|
2006-03-21 14:23:58 +08:00
|
|
|
extern int rtnl_trylock(void);
|
2008-04-24 13:10:48 +08:00
|
|
|
extern int rtnl_is_locked(void);
|
2014-05-13 06:11:20 +08:00
|
|
|
|
|
|
|
extern wait_queue_head_t netdev_unregistering_wq;
|
|
|
|
extern struct mutex net_mutex;
|
|
|
|
|
2010-02-23 09:04:49 +08:00
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
|
|
extern int lockdep_rtnl_is_held(void);
|
2013-11-26 14:33:52 +08:00
|
|
|
#else
|
|
|
|
static inline int lockdep_rtnl_is_held(void)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
2010-02-23 09:04:49 +08:00
|
|
|
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
2006-03-21 14:23:58 +08:00
|
|
|
|
2010-09-09 05:15:32 +08:00
|
|
|
/**
|
|
|
|
* rcu_dereference_rtnl - rcu_dereference with debug checking
|
|
|
|
* @p: The pointer to read, prior to dereferencing
|
|
|
|
*
|
|
|
|
* Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
|
2010-10-05 15:29:48 +08:00
|
|
|
* or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference()
|
2010-09-09 05:15:32 +08:00
|
|
|
*/
|
|
|
|
#define rcu_dereference_rtnl(p) \
|
2011-07-08 20:39:41 +08:00
|
|
|
rcu_dereference_check(p, lockdep_rtnl_is_held())
|
2010-09-09 05:15:32 +08:00
|
|
|
|
2014-09-13 11:08:20 +08:00
|
|
|
/**
|
|
|
|
* rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking
|
|
|
|
* @p: The pointer to read, prior to dereference
|
|
|
|
*
|
|
|
|
* Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh()
|
|
|
|
* or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh()
|
|
|
|
*/
|
|
|
|
#define rcu_dereference_bh_rtnl(p) \
|
|
|
|
rcu_dereference_bh_check(p, lockdep_rtnl_is_held())
|
|
|
|
|
2010-09-15 19:07:15 +08:00
|
|
|
/**
|
2010-10-05 15:29:48 +08:00
|
|
|
* rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
|
2010-09-15 19:07:15 +08:00
|
|
|
* @p: The pointer to read, prior to dereferencing
|
|
|
|
*
|
2010-10-05 15:29:48 +08:00
|
|
|
* Return the value of the specified RCU-protected pointer, but omit
|
|
|
|
* both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
|
|
|
|
* caller holds RTNL.
|
2010-09-15 19:07:15 +08:00
|
|
|
*/
|
|
|
|
#define rtnl_dereference(p) \
|
2010-10-05 15:29:48 +08:00
|
|
|
rcu_dereference_protected(p, lockdep_rtnl_is_held())
|
2010-09-15 19:07:15 +08:00
|
|
|
|
2010-10-02 14:11:55 +08:00
|
|
|
static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
|
|
|
|
{
|
|
|
|
return rtnl_dereference(dev->ingress_queue);
|
|
|
|
}
|
|
|
|
|
net: use jump label patching for ingress qdisc in __netif_receive_skb_core
Even if we make use of classifier and actions from the egress
path, we're going into handle_ing() executing additional code
on a per-packet cost for ingress qdisc, just to realize that
nothing is attached on ingress.
Instead, this can just be blinded out as a no-op entirely with
the use of a static key. On input fast-path, we already make
use of static keys in various places, e.g. skb time stamping,
in RPS, etc. It makes sense to not waste time when we're assured
that no ingress qdisc is attached anywhere.
Enabling/disabling of that code path is being done via two
helpers, namely net_{inc,dec}_ingress_queue(), that are being
invoked under RTNL mutex when a ingress qdisc is being either
initialized or destructed.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-04-11 05:07:54 +08:00
|
|
|
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
|
|
|
|
|
2015-05-14 00:19:37 +08:00
|
|
|
#ifdef CONFIG_NET_INGRESS
|
net: use jump label patching for ingress qdisc in __netif_receive_skb_core
Even if we make use of classifier and actions from the egress
path, we're going into handle_ing() executing additional code
on a per-packet cost for ingress qdisc, just to realize that
nothing is attached on ingress.
Instead, this can just be blinded out as a no-op entirely with
the use of a static key. On input fast-path, we already make
use of static keys in various places, e.g. skb time stamping,
in RPS, etc. It makes sense to not waste time when we're assured
that no ingress qdisc is attached anywhere.
Enabling/disabling of that code path is being done via two
helpers, namely net_{inc,dec}_ingress_queue(), that are being
invoked under RTNL mutex when a ingress qdisc is being either
initialized or destructed.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-04-11 05:07:54 +08:00
|
|
|
void net_inc_ingress_queue(void);
|
|
|
|
void net_dec_ingress_queue(void);
|
|
|
|
#endif
|
2010-10-02 14:11:55 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
extern void rtnetlink_init(void);
|
2006-03-21 14:23:58 +08:00
|
|
|
extern void __rtnl_unlock(void);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#define ASSERT_RTNL() do { \
|
2008-04-24 13:10:48 +08:00
|
|
|
if (unlikely(!rtnl_is_locked())) { \
|
2005-04-17 06:20:36 +08:00
|
|
|
printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
|
|
|
|
__FILE__, __LINE__); \
|
|
|
|
dump_stack(); \
|
|
|
|
} \
|
|
|
|
} while(0)
|
|
|
|
|
2012-04-15 14:43:56 +08:00
|
|
|
extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
|
|
|
|
struct netlink_callback *cb,
|
|
|
|
struct net_device *dev,
|
2014-07-10 19:01:58 +08:00
|
|
|
struct net_device *filter_dev,
|
2012-04-15 14:43:56 +08:00
|
|
|
int idx);
|
2013-03-06 23:39:42 +08:00
|
|
|
extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
|
|
|
|
struct nlattr *tb[],
|
|
|
|
struct net_device *dev,
|
|
|
|
const unsigned char *addr,
|
2014-11-28 21:34:15 +08:00
|
|
|
u16 vid,
|
|
|
|
u16 flags);
|
2013-03-06 23:39:42 +08:00
|
|
|
extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
|
|
|
|
struct nlattr *tb[],
|
|
|
|
struct net_device *dev,
|
2014-11-28 21:34:15 +08:00
|
|
|
const unsigned char *addr,
|
|
|
|
u16 vid);
|
2012-10-24 16:13:09 +08:00
|
|
|
|
|
|
|
extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
2014-11-28 21:34:25 +08:00
|
|
|
struct net_device *dev, u16 mode,
|
2015-04-29 00:33:49 +08:00
|
|
|
u32 flags, u32 mask, int nlflags);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* __LINUX_RTNETLINK_H */
|