2005-04-17 06:20:36 +08:00
|
|
|
#ifndef __LINUX_NETFILTER_H
|
|
|
|
#define __LINUX_NETFILTER_H
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/if.h>
|
2008-01-15 15:40:34 +08:00
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/in6.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/list.h>
|
2014-08-22 10:40:15 +08:00
|
|
|
#include <linux/static_key.h>
|
2015-06-17 23:28:27 +08:00
|
|
|
#include <linux/netfilter_defs.h>
|
2015-07-11 07:15:06 +08:00
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <net/net_namespace.h>
|
2015-06-17 23:28:27 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_NETFILTER
|
2011-01-18 22:52:14 +08:00
|
|
|
static inline int NF_DROP_GETERR(int verdict)
|
|
|
|
{
|
|
|
|
return -(verdict >> NF_VERDICT_QBITS);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-03-26 11:09:33 +08:00
|
|
|
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
|
|
|
|
const union nf_inet_addr *a2)
|
|
|
|
{
|
|
|
|
return a1->all[0] == a2->all[0] &&
|
|
|
|
a1->all[1] == a2->all[1] &&
|
|
|
|
a1->all[2] == a2->all[2] &&
|
|
|
|
a1->all[3] == a2->all[3];
|
|
|
|
}
|
|
|
|
|
2012-05-18 04:08:57 +08:00
|
|
|
static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
|
|
|
|
union nf_inet_addr *result,
|
|
|
|
const union nf_inet_addr *mask)
|
|
|
|
{
|
|
|
|
result->all[0] = a1->all[0] & mask->all[0];
|
|
|
|
result->all[1] = a1->all[1] & mask->all[1];
|
|
|
|
result->all[2] = a1->all[2] & mask->all[2];
|
|
|
|
result->all[3] = a1->all[3] & mask->all[3];
|
|
|
|
}
|
|
|
|
|
2013-09-27 05:48:15 +08:00
|
|
|
int netfilter_init(void);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
struct sk_buff;
|
|
|
|
|
2013-10-10 15:21:55 +08:00
|
|
|
struct nf_hook_ops;
|
2015-04-04 04:23:58 +08:00
|
|
|
|
2015-04-06 10:19:00 +08:00
|
|
|
struct sock;
|
|
|
|
|
2015-04-04 04:23:58 +08:00
|
|
|
struct nf_hook_state {
|
|
|
|
unsigned int hook;
|
|
|
|
int thresh;
|
|
|
|
u_int8_t pf;
|
|
|
|
struct net_device *in;
|
|
|
|
struct net_device *out;
|
2015-04-06 10:19:00 +08:00
|
|
|
struct sock *sk;
|
2015-05-14 00:19:35 +08:00
|
|
|
struct list_head *hook_list;
|
2015-04-06 10:19:04 +08:00
|
|
|
int (*okfn)(struct sock *, struct sk_buff *);
|
2015-04-04 04:23:58 +08:00
|
|
|
};
|
|
|
|
|
2015-04-06 10:18:54 +08:00
|
|
|
static inline void nf_hook_state_init(struct nf_hook_state *p,
|
2015-05-14 00:19:35 +08:00
|
|
|
struct list_head *hook_list,
|
2015-04-06 10:18:54 +08:00
|
|
|
unsigned int hook,
|
|
|
|
int thresh, u_int8_t pf,
|
|
|
|
struct net_device *indev,
|
|
|
|
struct net_device *outdev,
|
2015-04-06 10:19:00 +08:00
|
|
|
struct sock *sk,
|
2015-04-06 10:19:04 +08:00
|
|
|
int (*okfn)(struct sock *, struct sk_buff *))
|
2015-04-06 10:18:54 +08:00
|
|
|
{
|
|
|
|
p->hook = hook;
|
|
|
|
p->thresh = thresh;
|
|
|
|
p->pf = pf;
|
|
|
|
p->in = indev;
|
|
|
|
p->out = outdev;
|
2015-04-06 10:19:00 +08:00
|
|
|
p->sk = sk;
|
2015-05-14 00:19:35 +08:00
|
|
|
p->hook_list = hook_list;
|
2015-04-06 10:18:54 +08:00
|
|
|
p->okfn = okfn;
|
|
|
|
}
|
|
|
|
|
2013-10-10 15:21:55 +08:00
|
|
|
typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
|
2007-10-15 15:53:15 +08:00
|
|
|
struct sk_buff *skb,
|
2015-04-04 08:32:56 +08:00
|
|
|
const struct nf_hook_state *state);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-11-05 01:50:58 +08:00
|
|
|
struct nf_hook_ops {
|
2015-05-14 00:19:34 +08:00
|
|
|
struct list_head list;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* User fills in from here down. */
|
2015-05-14 00:19:34 +08:00
|
|
|
nf_hookfn *hook;
|
netfilter: add netfilter ingress hook after handle_ing() under unique static key
This patch adds the Netfilter ingress hook just after the existing tc ingress
hook, that seems to be the consensus solution for this.
Note that the Netfilter hook resides under the global static key that enables
ingress filtering. Nonetheless, Netfilter still also has its own static key for
minimal impact on the existing handle_ing().
* Without this patch:
Result: OK: 6216490(c6216338+d152) usec, 100000000 (60byte,0frags)
16086246pps 7721Mb/sec (7721398080bps) errors: 100000000
42.46% kpktgend_0 [kernel.kallsyms] [k] __netif_receive_skb_core
25.92% kpktgend_0 [kernel.kallsyms] [k] kfree_skb
7.81% kpktgend_0 [pktgen] [k] pktgen_thread_worker
5.62% kpktgend_0 [kernel.kallsyms] [k] ip_rcv
2.70% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_internal
2.34% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_sk
1.44% kpktgend_0 [kernel.kallsyms] [k] __build_skb
* With this patch:
Result: OK: 6214833(c6214731+d101) usec, 100000000 (60byte,0frags)
16090536pps 7723Mb/sec (7723457280bps) errors: 100000000
41.23% kpktgend_0 [kernel.kallsyms] [k] __netif_receive_skb_core
26.57% kpktgend_0 [kernel.kallsyms] [k] kfree_skb
7.72% kpktgend_0 [pktgen] [k] pktgen_thread_worker
5.55% kpktgend_0 [kernel.kallsyms] [k] ip_rcv
2.78% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_internal
2.06% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_sk
1.43% kpktgend_0 [kernel.kallsyms] [k] __build_skb
* Without this patch + tc ingress:
tc filter add dev eth4 parent ffff: protocol ip prio 1 \
u32 match ip dst 4.3.2.1/32
Result: OK: 9269001(c9268821+d179) usec, 100000000 (60byte,0frags)
10788648pps 5178Mb/sec (5178551040bps) errors: 100000000
40.99% kpktgend_0 [kernel.kallsyms] [k] __netif_receive_skb_core
17.50% kpktgend_0 [kernel.kallsyms] [k] kfree_skb
11.77% kpktgend_0 [cls_u32] [k] u32_classify
5.62% kpktgend_0 [kernel.kallsyms] [k] tc_classify_compat
5.18% kpktgend_0 [pktgen] [k] pktgen_thread_worker
3.23% kpktgend_0 [kernel.kallsyms] [k] tc_classify
2.97% kpktgend_0 [kernel.kallsyms] [k] ip_rcv
1.83% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_internal
1.50% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_sk
0.99% kpktgend_0 [kernel.kallsyms] [k] __build_skb
* With this patch + tc ingress:
tc filter add dev eth4 parent ffff: protocol ip prio 1 \
u32 match ip dst 4.3.2.1/32
Result: OK: 9308218(c9308091+d126) usec, 100000000 (60byte,0frags)
10743194pps 5156Mb/sec (5156733120bps) errors: 100000000
42.01% kpktgend_0 [kernel.kallsyms] [k] __netif_receive_skb_core
17.78% kpktgend_0 [kernel.kallsyms] [k] kfree_skb
11.70% kpktgend_0 [cls_u32] [k] u32_classify
5.46% kpktgend_0 [kernel.kallsyms] [k] tc_classify_compat
5.16% kpktgend_0 [pktgen] [k] pktgen_thread_worker
2.98% kpktgend_0 [kernel.kallsyms] [k] ip_rcv
2.84% kpktgend_0 [kernel.kallsyms] [k] tc_classify
1.96% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_internal
1.57% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_sk
Note that the results are very similar before and after.
I can see gcc gets the code under the ingress static key out of the hot path.
Then, on that cold branch, it generates the code to accomodate the netfilter
ingress static key. My explanation for this is that this reduces the pressure
on the instruction cache for non-users as the new code is out of the hot path,
and it comes with minimal impact for tc ingress users.
Using gcc version 4.8.4 on:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 8
[...]
L1d cache: 16K
L1i cache: 64K
L2 cache: 2048K
L3 cache: 8192K
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-14 00:19:38 +08:00
|
|
|
struct net_device *dev;
|
2015-05-14 00:19:34 +08:00
|
|
|
struct module *owner;
|
|
|
|
void *priv;
|
|
|
|
u_int8_t pf;
|
|
|
|
unsigned int hooknum;
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Hooks are ordered in ascending priority. */
|
2015-05-14 00:19:34 +08:00
|
|
|
int priority;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2009-11-05 01:50:58 +08:00
|
|
|
struct nf_sockopt_ops {
|
2005-04-17 06:20:36 +08:00
|
|
|
struct list_head list;
|
|
|
|
|
2008-10-08 17:35:00 +08:00
|
|
|
u_int8_t pf;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Non-inclusive ranges: use 0/0/NULL to never get called. */
|
|
|
|
int set_optmin;
|
|
|
|
int set_optmax;
|
|
|
|
int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
|
2010-02-02 22:03:24 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
2006-03-21 14:45:21 +08:00
|
|
|
int (*compat_set)(struct sock *sk, int optval,
|
|
|
|
void __user *user, unsigned int len);
|
2010-02-02 22:03:24 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
int get_optmin;
|
|
|
|
int get_optmax;
|
|
|
|
int (*get)(struct sock *sk, int optval, void __user *user, int *len);
|
2010-02-02 22:03:24 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
2006-03-21 14:45:21 +08:00
|
|
|
int (*compat_get)(struct sock *sk, int optval,
|
|
|
|
void __user *user, int *len);
|
2010-02-02 22:03:24 +08:00
|
|
|
#endif
|
[NETFILTER]: Fix/improve deadlock condition on module removal netfilter
So I've had a deadlock reported to me. I've found that the sequence of
events goes like this:
1) process A (modprobe) runs to remove ip_tables.ko
2) process B (iptables-restore) runs and calls setsockopt on a netfilter socket,
increasing the ip_tables socket_ops use count
3) process A acquires a file lock on the file ip_tables.ko, calls remove_module
in the kernel, which in turn executes the ip_tables module cleanup routine,
which calls nf_unregister_sockopt
4) nf_unregister_sockopt, seeing that the use count is non-zero, puts the
calling process into uninterruptible sleep, expecting the process using the
socket option code to wake it up when it exits the kernel
4) the user of the socket option code (process B) in do_ipt_get_ctl, calls
ipt_find_table_lock, which in this case calls request_module to load
ip_tables_nat.ko
5) request_module forks a copy of modprobe (process C) to load the module and
blocks until modprobe exits.
6) Process C. forked by request_module process the dependencies of
ip_tables_nat.ko, of which ip_tables.ko is one.
7) Process C attempts to lock the request module and all its dependencies, it
blocks when it attempts to lock ip_tables.ko (which was previously locked in
step 3)
Theres not really any great permanent solution to this that I can see, but I've
developed a two part solution that corrects the problem
Part 1) Modifies the nf_sockopt registration code so that, instead of using a
use counter internal to the nf_sockopt_ops structure, we instead use a pointer
to the registering modules owner to do module reference counting when nf_sockopt
calls a modules set/get routine. This prevents the deadlock by preventing set 4
from happening.
Part 2) Enhances the modprobe utilty so that by default it preforms non-blocking
remove operations (the same way rmmod does), and add an option to explicity
request blocking operation. So if you select blocking operation in modprobe you
can still cause the above deadlock, but only if you explicity try (and since
root can do any old stupid thing it would like.... :) ).
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-11 17:28:26 +08:00
|
|
|
/* Use the module struct to lock set/get code in place */
|
|
|
|
struct module *owner;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Function to register/unregister hook points. */
|
2015-07-11 07:15:06 +08:00
|
|
|
int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
|
|
|
|
void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
|
|
|
|
int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
|
|
|
|
unsigned int n);
|
|
|
|
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
|
|
|
|
unsigned int n);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
int nf_register_hook(struct nf_hook_ops *reg);
|
|
|
|
void nf_unregister_hook(struct nf_hook_ops *reg);
|
2006-04-07 05:09:12 +08:00
|
|
|
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
|
|
|
|
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Functions to register get/setsockopt ranges (non-inclusive). You
|
|
|
|
need to check permissions yourself! */
|
|
|
|
int nf_register_sockopt(struct nf_sockopt_ops *reg);
|
|
|
|
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
|
|
|
|
|
2014-08-22 10:40:15 +08:00
|
|
|
#ifdef HAVE_JUMP_LABEL
|
2012-02-24 15:31:31 +08:00
|
|
|
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
|
2014-08-22 10:40:15 +08:00
|
|
|
|
2015-07-20 19:32:52 +08:00
|
|
|
static inline bool nf_hook_list_active(struct list_head *hook_list,
|
2015-05-14 00:19:36 +08:00
|
|
|
u_int8_t pf, unsigned int hook)
|
2011-11-19 01:32:46 +08:00
|
|
|
{
|
|
|
|
if (__builtin_constant_p(pf) &&
|
|
|
|
__builtin_constant_p(hook))
|
2012-02-24 15:31:31 +08:00
|
|
|
return static_key_false(&nf_hooks_needed[pf][hook]);
|
2011-11-19 01:32:46 +08:00
|
|
|
|
2015-07-20 19:32:52 +08:00
|
|
|
return !list_empty(hook_list);
|
2011-11-19 01:32:46 +08:00
|
|
|
}
|
|
|
|
#else
|
2015-07-20 19:32:52 +08:00
|
|
|
static inline bool nf_hook_list_active(struct list_head *hook_list,
|
2015-05-14 00:19:36 +08:00
|
|
|
u_int8_t pf, unsigned int hook)
|
2011-11-19 01:32:46 +08:00
|
|
|
{
|
2015-07-20 19:32:52 +08:00
|
|
|
return !list_empty(hook_list);
|
2011-11-19 01:32:46 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-04-04 04:23:58 +08:00
|
|
|
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
|
2006-01-07 15:01:48 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* nf_hook_thresh - call a netfilter hook
|
2015-05-14 00:19:36 +08:00
|
|
|
*
|
2006-01-07 15:01:48 +08:00
|
|
|
* Returns 1 if the hook has allowed the packet to pass. The function
|
|
|
|
* okfn must be invoked by the caller in this case. Any other return
|
|
|
|
* value indicates the packet has been consumed by the hook.
|
|
|
|
*/
|
2008-10-08 17:35:00 +08:00
|
|
|
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
|
2015-04-06 10:19:04 +08:00
|
|
|
struct sock *sk,
|
2007-10-15 15:53:15 +08:00
|
|
|
struct sk_buff *skb,
|
2006-01-07 15:01:48 +08:00
|
|
|
struct net_device *indev,
|
|
|
|
struct net_device *outdev,
|
2015-04-06 10:19:04 +08:00
|
|
|
int (*okfn)(struct sock *, struct sk_buff *),
|
|
|
|
int thresh)
|
2006-01-07 15:01:48 +08:00
|
|
|
{
|
2015-07-11 07:15:06 +08:00
|
|
|
struct net *net = dev_net(indev ? indev : outdev);
|
2015-07-20 19:32:52 +08:00
|
|
|
struct list_head *hook_list = &net->nf.hooks[pf][hook];
|
2015-07-11 07:13:20 +08:00
|
|
|
|
2015-07-20 19:32:52 +08:00
|
|
|
if (nf_hook_list_active(hook_list, pf, hook)) {
|
2015-04-06 10:18:54 +08:00
|
|
|
struct nf_hook_state state;
|
2015-04-04 04:23:58 +08:00
|
|
|
|
2015-07-20 19:32:52 +08:00
|
|
|
nf_hook_state_init(&state, hook_list, hook, thresh,
|
2015-05-14 00:19:35 +08:00
|
|
|
pf, indev, outdev, sk, okfn);
|
2015-04-04 04:23:58 +08:00
|
|
|
return nf_hook_slow(skb, &state);
|
|
|
|
}
|
2011-11-19 01:32:46 +08:00
|
|
|
return 1;
|
2006-01-07 15:01:48 +08:00
|
|
|
}
|
|
|
|
|
2015-04-06 10:19:04 +08:00
|
|
|
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
|
|
|
|
struct sk_buff *skb, struct net_device *indev,
|
|
|
|
struct net_device *outdev,
|
|
|
|
int (*okfn)(struct sock *, struct sk_buff *))
|
2006-01-07 15:01:48 +08:00
|
|
|
{
|
2015-04-06 10:19:04 +08:00
|
|
|
return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN);
|
2006-01-07 15:01:48 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Activate hook; either okfn or kfree_skb called, unless a hook
|
|
|
|
returns NF_STOLEN (in which case, it's up to the hook to deal with
|
|
|
|
the consequences).
|
|
|
|
|
|
|
|
Returns -ERRNO if packet dropped. Zero means queued, stolen or
|
|
|
|
accepted.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* RR:
|
|
|
|
> I don't want nf_hook to return anything because people might forget
|
|
|
|
> about async and trust the return value to mean "packet was ok".
|
|
|
|
|
|
|
|
AK:
|
|
|
|
Just document it clearly, then you can expect some sense from kernel
|
|
|
|
coders :)
|
|
|
|
*/
|
|
|
|
|
2009-06-13 10:13:26 +08:00
|
|
|
static inline int
|
2015-04-06 10:19:04 +08:00
|
|
|
NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk,
|
|
|
|
struct sk_buff *skb, struct net_device *in,
|
|
|
|
struct net_device *out,
|
|
|
|
int (*okfn)(struct sock *, struct sk_buff *), int thresh)
|
2009-06-13 10:13:26 +08:00
|
|
|
{
|
2015-04-06 10:19:04 +08:00
|
|
|
int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh);
|
2009-06-13 10:13:26 +08:00
|
|
|
if (ret == 1)
|
2015-04-06 10:19:04 +08:00
|
|
|
ret = okfn(sk, skb);
|
2009-06-13 10:13:26 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2006-02-16 07:10:22 +08:00
|
|
|
|
2009-06-13 10:13:26 +08:00
|
|
|
static inline int
|
2015-04-06 10:19:04 +08:00
|
|
|
NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk,
|
|
|
|
struct sk_buff *skb, struct net_device *in, struct net_device *out,
|
|
|
|
int (*okfn)(struct sock *, struct sk_buff *), bool cond)
|
2009-06-13 10:13:26 +08:00
|
|
|
{
|
2010-02-19 15:03:28 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cond ||
|
2015-04-06 10:19:04 +08:00
|
|
|
((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1))
|
|
|
|
ret = okfn(sk, skb);
|
2009-06-13 10:13:26 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-06-13 10:13:26 +08:00
|
|
|
static inline int
|
2015-04-06 10:19:04 +08:00
|
|
|
NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb,
|
2009-06-13 10:13:26 +08:00
|
|
|
struct net_device *in, struct net_device *out,
|
2015-04-06 10:19:04 +08:00
|
|
|
int (*okfn)(struct sock *, struct sk_buff *))
|
2009-06-13 10:13:26 +08:00
|
|
|
{
|
2015-04-06 10:19:04 +08:00
|
|
|
return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN);
|
2009-06-13 10:13:26 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Call setsockopt() */
|
2008-10-08 17:35:00 +08:00
|
|
|
int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int len);
|
2008-10-08 17:35:00 +08:00
|
|
|
int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
|
2005-04-17 06:20:36 +08:00
|
|
|
int *len);
|
2010-02-02 22:03:24 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
2008-10-08 17:35:00 +08:00
|
|
|
int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *opt, unsigned int len);
|
2008-10-08 17:35:00 +08:00
|
|
|
int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
|
2006-03-21 14:45:21 +08:00
|
|
|
char __user *opt, int *len);
|
2010-02-02 22:03:24 +08:00
|
|
|
#endif
|
2006-03-21 14:45:21 +08:00
|
|
|
|
2005-08-10 10:37:23 +08:00
|
|
|
/* Call this before modifying an existing packet: ensures it is
|
|
|
|
modifiable and linear to the point you care about (writable_len).
|
|
|
|
Returns true or false. */
|
2013-09-27 05:48:15 +08:00
|
|
|
int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
|
2005-08-10 10:37:23 +08:00
|
|
|
|
2007-12-05 17:22:05 +08:00
|
|
|
struct flowi;
|
2007-12-05 17:26:33 +08:00
|
|
|
struct nf_queue_entry;
|
2007-12-05 17:24:48 +08:00
|
|
|
|
2006-04-07 05:18:09 +08:00
|
|
|
struct nf_afinfo {
|
|
|
|
unsigned short family;
|
2006-11-15 13:40:42 +08:00
|
|
|
__sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
|
2006-04-07 05:18:43 +08:00
|
|
|
unsigned int dataoff, u_int8_t protocol);
|
2008-03-20 22:15:53 +08:00
|
|
|
__sum16 (*checksum_partial)(struct sk_buff *skb,
|
|
|
|
unsigned int hook,
|
|
|
|
unsigned int dataoff,
|
|
|
|
unsigned int len,
|
|
|
|
u_int8_t protocol);
|
2011-04-04 22:56:29 +08:00
|
|
|
int (*route)(struct net *net, struct dst_entry **dst,
|
2011-04-04 23:00:54 +08:00
|
|
|
struct flowi *fl, bool strict);
|
2006-04-07 05:18:09 +08:00
|
|
|
void (*saveroute)(const struct sk_buff *skb,
|
2007-12-05 17:26:33 +08:00
|
|
|
struct nf_queue_entry *entry);
|
2007-10-15 15:53:15 +08:00
|
|
|
int (*reroute)(struct sk_buff *skb,
|
2007-12-05 17:26:33 +08:00
|
|
|
const struct nf_queue_entry *entry);
|
2006-04-07 05:18:09 +08:00
|
|
|
int route_key_size;
|
2005-08-10 10:42:34 +08:00
|
|
|
};
|
|
|
|
|
2010-11-16 01:17:21 +08:00
|
|
|
extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
|
2007-12-18 14:42:27 +08:00
|
|
|
static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
|
2006-04-07 05:18:09 +08:00
|
|
|
{
|
|
|
|
return rcu_dereference(nf_afinfo[family]);
|
|
|
|
}
|
2005-08-10 10:42:34 +08:00
|
|
|
|
2006-11-15 13:40:42 +08:00
|
|
|
static inline __sum16
|
2006-04-07 05:18:43 +08:00
|
|
|
nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
|
|
|
|
u_int8_t protocol, unsigned short family)
|
|
|
|
{
|
2007-12-18 14:42:27 +08:00
|
|
|
const struct nf_afinfo *afinfo;
|
2006-11-15 13:40:42 +08:00
|
|
|
__sum16 csum = 0;
|
2006-04-07 05:18:43 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
afinfo = nf_get_afinfo(family);
|
|
|
|
if (afinfo)
|
|
|
|
csum = afinfo->checksum(skb, hook, dataoff, protocol);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return csum;
|
|
|
|
}
|
|
|
|
|
2008-03-20 22:15:53 +08:00
|
|
|
static inline __sum16
|
|
|
|
nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
|
|
|
|
unsigned int dataoff, unsigned int len,
|
|
|
|
u_int8_t protocol, unsigned short family)
|
|
|
|
{
|
|
|
|
const struct nf_afinfo *afinfo;
|
|
|
|
__sum16 csum = 0;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
afinfo = nf_get_afinfo(family);
|
|
|
|
if (afinfo)
|
|
|
|
csum = afinfo->checksum_partial(skb, hook, dataoff, len,
|
|
|
|
protocol);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return csum;
|
|
|
|
}
|
|
|
|
|
2013-09-27 05:48:15 +08:00
|
|
|
int nf_register_afinfo(const struct nf_afinfo *afinfo);
|
|
|
|
void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
|
2006-04-07 05:18:09 +08:00
|
|
|
|
2006-01-07 15:06:30 +08:00
|
|
|
#include <net/flow.h>
|
2012-08-27 01:14:06 +08:00
|
|
|
extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
|
2006-01-07 15:06:30 +08:00
|
|
|
|
|
|
|
static inline void
|
2008-10-08 17:35:00 +08:00
|
|
|
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
|
2006-01-07 15:06:30 +08:00
|
|
|
{
|
2007-12-18 14:42:51 +08:00
|
|
|
#ifdef CONFIG_NF_NAT_NEEDED
|
2006-01-07 15:06:30 +08:00
|
|
|
void (*decodefn)(struct sk_buff *, struct flowi *);
|
|
|
|
|
2012-08-27 01:14:06 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
decodefn = rcu_dereference(nf_nat_decode_session_hook);
|
|
|
|
if (decodefn)
|
|
|
|
decodefn(skb, fl);
|
|
|
|
rcu_read_unlock();
|
2006-01-07 15:06:30 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#else /* !CONFIG_NETFILTER */
|
2015-04-06 10:19:04 +08:00
|
|
|
#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb)
|
|
|
|
#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb)
|
2008-10-08 17:35:00 +08:00
|
|
|
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
|
2015-04-06 10:19:04 +08:00
|
|
|
struct sock *sk,
|
2007-10-15 15:53:15 +08:00
|
|
|
struct sk_buff *skb,
|
2006-01-08 04:50:27 +08:00
|
|
|
struct net_device *indev,
|
|
|
|
struct net_device *outdev,
|
2015-04-06 10:19:04 +08:00
|
|
|
int (*okfn)(struct sock *sk, struct sk_buff *), int thresh)
|
2006-01-08 04:50:27 +08:00
|
|
|
{
|
2015-04-06 10:19:04 +08:00
|
|
|
return okfn(sk, skb);
|
2006-01-08 04:50:27 +08:00
|
|
|
}
|
2015-04-06 10:19:04 +08:00
|
|
|
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
|
|
|
|
struct sk_buff *skb, struct net_device *indev,
|
|
|
|
struct net_device *outdev,
|
|
|
|
int (*okfn)(struct sock *, struct sk_buff *))
|
2006-01-08 04:50:27 +08:00
|
|
|
{
|
2006-02-16 07:18:19 +08:00
|
|
|
return 1;
|
2006-01-08 04:50:27 +08:00
|
|
|
}
|
|
|
|
struct flowi;
|
2006-01-07 15:06:30 +08:00
|
|
|
static inline void
|
2008-10-08 17:35:00 +08:00
|
|
|
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
|
|
|
|
{
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /*CONFIG_NETFILTER*/
|
|
|
|
|
2007-03-24 02:17:07 +08:00
|
|
|
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
2015-09-03 07:26:07 +08:00
|
|
|
#include <linux/netfilter/nf_conntrack_zones_common.h>
|
|
|
|
|
2013-07-29 04:54:08 +08:00
|
|
|
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
|
2013-09-27 05:48:15 +08:00
|
|
|
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
|
2010-11-16 01:17:21 +08:00
|
|
|
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
|
2012-06-07 18:13:39 +08:00
|
|
|
|
|
|
|
struct nf_conn;
|
2013-08-27 14:50:12 +08:00
|
|
|
enum ip_conntrack_info;
|
2012-06-07 18:13:39 +08:00
|
|
|
struct nlattr;
|
|
|
|
|
|
|
|
struct nfq_ct_hook {
|
|
|
|
size_t (*build_size)(const struct nf_conn *ct);
|
|
|
|
int (*build)(struct sk_buff *skb, struct nf_conn *ct);
|
|
|
|
int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
|
2013-08-08 00:13:20 +08:00
|
|
|
int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
|
|
|
|
u32 portid, u32 report);
|
2012-06-07 19:31:25 +08:00
|
|
|
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
|
2013-08-27 14:50:12 +08:00
|
|
|
enum ip_conntrack_info ctinfo, s32 off);
|
2012-06-07 18:13:39 +08:00
|
|
|
};
|
2013-08-27 14:50:12 +08:00
|
|
|
extern struct nfq_ct_hook __rcu *nfq_ct_hook;
|
2007-03-24 02:17:07 +08:00
|
|
|
#else
|
|
|
|
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
|
|
|
|
#endif
|
|
|
|
|
2015-07-14 23:51:07 +08:00
|
|
|
/**
|
|
|
|
* nf_skb_duplicated - TEE target has sent a packet
|
|
|
|
*
|
|
|
|
* When a xtables target sends a packet, the OUTPUT and POSTROUTING
|
|
|
|
* hooks are traversed again, i.e. nft and xtables are invoked recursively.
|
|
|
|
*
|
|
|
|
* This is used by xtables TEE target to prevent the duplicated skb from
|
|
|
|
* being duplicated again.
|
|
|
|
*/
|
|
|
|
DECLARE_PER_CPU(bool, nf_skb_duplicated);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /*__LINUX_NETFILTER_H*/
|