Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says: ==================== Netfilter updates for net-next The following patchset contains Netfilter updates for your net-next tree. This batch contains connection tracking updates for the cleanup iteration path, patches from Florian Westphal: X) Skip unconfirmed conntracks in nf_ct_iterate_cleanup_net(), just set dying bit to let the CPU release them. X) Add nf_ct_iterate_destroy() to be used on module removal, to kill conntrack from all namespace. X) Restart iteration on hashtable resizing, since both may occur at the same time. X) Use the new nf_ct_iterate_destroy() to remove conntrack with NAT mapping on module removal. X) Use nf_ct_iterate_destroy() to remove conntrack entries helper module removal, from Liping Zhang. X) Use nf_ct_iterate_cleanup_net() to remove the timeout extension if user requests this, also from Liping. X) Add net_ns_barrier() and use it from FTP helper, so make sure no concurrent namespace removal happens at the same time while the helper module is being removed. X) Use NFPROTO_MAX in layer 3 conntrack protocol array, to reduce module size. Same thing in nf_tables. Updates for the nf_tables infrastructure: X) Prepare usage of the extended ACK reporting infrastructure for nf_tables. X) Remove unnecessary forward declaration in nf_tables hash set. X) Skip set size estimation if number of element is not specified. X) Changes to accomodate a (faster) unresizable hash set implementation, for anonymous sets and dynamic size fixed sets with no timeouts. X) Faster lookup function for unresizable hash table for 2 and 4 bytes key. And, finally, a bunch of asorted small updates and cleanups: X) Do not hold reference to netdev from ipt_CLUSTER, instead subscribe to device events and look up for index from the packet path, this is fixing an issue that is present since the very beginning, patch from Xin Long. X) Use nf_register_net_hook() in ipt_CLUSTER, from Florian Westphal. X) Use ebt_invalid_target() whenever possible in the ebtables tree, from Gao Feng. X) Calm down compilation warning in nf_dup infrastructure, patch from stephen hemminger. X) Statify functions in nftables rt expression, also from stephen. X) Update Makefile to use canonical method to specify nf_tables-objs. From Jike Song. X) Use nf_conntrack_helpers_register() in amanda and H323. X) Space cleanup for ctnetlink, from linzhang. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
52a623bd61
|
@ -1,7 +1,6 @@
|
|||
#ifndef _NFNETLINK_H
|
||||
#define _NFNETLINK_H
|
||||
|
||||
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/capability.h>
|
||||
#include <net/netlink.h>
|
||||
|
@ -10,13 +9,16 @@
|
|||
struct nfnl_callback {
|
||||
int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[]);
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack);
|
||||
int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[]);
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack);
|
||||
int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[]);
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack);
|
||||
const struct nla_policy *policy; /* netlink attribute policy */
|
||||
const u_int16_t attr_count; /* number of nlattr's */
|
||||
};
|
||||
|
|
|
@ -122,8 +122,6 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
|
|||
#define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS))
|
||||
/* Clear the bit in the hook mask that tells if the rule is on a base chain */
|
||||
#define CLEAR_BASE_CHAIN_BIT (par->hook_mask &= ~(1 << NF_BR_NUMHOOKS))
|
||||
/* True if the target is not a standard target */
|
||||
#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
|
||||
|
||||
static inline bool ebt_invalid_target(int target)
|
||||
{
|
||||
|
|
|
@ -158,6 +158,7 @@ extern struct net init_net;
|
|||
struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
|
||||
struct net *old_net);
|
||||
|
||||
void net_ns_barrier(void);
|
||||
#else /* CONFIG_NET_NS */
|
||||
#include <linux/sched.h>
|
||||
#include <linux/nsproxy.h>
|
||||
|
@ -168,6 +169,8 @@ static inline struct net *copy_net_ns(unsigned long flags,
|
|||
return ERR_PTR(-EINVAL);
|
||||
return old_net;
|
||||
}
|
||||
|
||||
static inline void net_ns_barrier(void) {}
|
||||
#endif /* CONFIG_NET_NS */
|
||||
|
||||
|
||||
|
|
|
@ -225,9 +225,13 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
|
|||
u32 seq);
|
||||
|
||||
/* Iterate over all conntracks: if iter returns true, it's deleted. */
|
||||
void nf_ct_iterate_cleanup(struct net *net,
|
||||
int (*iter)(struct nf_conn *i, void *data),
|
||||
void *data, u32 portid, int report);
|
||||
void nf_ct_iterate_cleanup_net(struct net *net,
|
||||
int (*iter)(struct nf_conn *i, void *data),
|
||||
void *data, u32 portid, int report);
|
||||
|
||||
/* also set unconfirmed conntracks as dying. Only use in module exit path. */
|
||||
void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data),
|
||||
void *data);
|
||||
|
||||
struct nf_conntrack_zone;
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ struct nf_conntrack_l3proto {
|
|||
struct module *me;
|
||||
};
|
||||
|
||||
extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
|
||||
extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO];
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
/* Protocol pernet registration. */
|
||||
|
@ -100,7 +100,7 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
|
|||
static inline struct nf_conntrack_l3proto *
|
||||
__nf_ct_l3proto_find(u_int16_t l3proto)
|
||||
{
|
||||
if (unlikely(l3proto >= AF_MAX))
|
||||
if (unlikely(l3proto >= NFPROTO_NUMPROTO))
|
||||
return &nf_conntrack_l3proto_generic;
|
||||
return rcu_dereference(nf_ct_l3protos[l3proto]);
|
||||
}
|
||||
|
|
|
@ -281,6 +281,23 @@ struct nft_set_estimate {
|
|||
enum nft_set_class space;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nft_set_type - nf_tables set type
|
||||
*
|
||||
* @select_ops: function to select nft_set_ops
|
||||
* @ops: default ops, used when no select_ops functions is present
|
||||
* @list: used internally
|
||||
* @owner: module reference
|
||||
*/
|
||||
struct nft_set_type {
|
||||
const struct nft_set_ops *(*select_ops)(const struct nft_ctx *,
|
||||
const struct nft_set_desc *desc,
|
||||
u32 flags);
|
||||
const struct nft_set_ops *ops;
|
||||
struct list_head list;
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
struct nft_set_ext;
|
||||
struct nft_expr;
|
||||
|
||||
|
@ -297,8 +314,6 @@ struct nft_expr;
|
|||
* @privsize: function to return size of set private data
|
||||
* @init: initialize private data of new set instance
|
||||
* @destroy: destroy private data of set instance
|
||||
* @list: nf_tables_set_ops list node
|
||||
* @owner: module reference
|
||||
* @elemsize: element private size
|
||||
* @features: features supported by the implementation
|
||||
*/
|
||||
|
@ -336,7 +351,8 @@ struct nft_set_ops {
|
|||
struct nft_set *set,
|
||||
struct nft_set_iter *iter);
|
||||
|
||||
unsigned int (*privsize)(const struct nlattr * const nla[]);
|
||||
unsigned int (*privsize)(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc);
|
||||
bool (*estimate)(const struct nft_set_desc *desc,
|
||||
u32 features,
|
||||
struct nft_set_estimate *est);
|
||||
|
@ -345,14 +361,13 @@ struct nft_set_ops {
|
|||
const struct nlattr * const nla[]);
|
||||
void (*destroy)(const struct nft_set *set);
|
||||
|
||||
struct list_head list;
|
||||
struct module *owner;
|
||||
unsigned int elemsize;
|
||||
u32 features;
|
||||
const struct nft_set_type *type;
|
||||
};
|
||||
|
||||
int nft_register_set(struct nft_set_ops *ops);
|
||||
void nft_unregister_set(struct nft_set_ops *ops);
|
||||
int nft_register_set(struct nft_set_type *type);
|
||||
void nft_unregister_set(struct nft_set_type *type);
|
||||
|
||||
/**
|
||||
* struct nft_set - nf_tables set instance
|
||||
|
|
|
@ -61,7 +61,7 @@ static int ebt_dnat_tg_check(const struct xt_tgchk_param *par)
|
|||
(strcmp(par->table, "broute") != 0 ||
|
||||
hook_mask & ~(1 << NF_BR_BROUTING)))
|
||||
return -EINVAL;
|
||||
if (INVALID_TARGET)
|
||||
if (ebt_invalid_target(info->target))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ static int ebt_mark_tg_check(const struct xt_tgchk_param *par)
|
|||
tmp = info->target | ~EBT_VERDICT_BITS;
|
||||
if (BASE_CHAIN && tmp == EBT_RETURN)
|
||||
return -EINVAL;
|
||||
if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
|
||||
if (ebt_invalid_target(tmp))
|
||||
return -EINVAL;
|
||||
tmp = info->target & ~EBT_VERDICT_BITS;
|
||||
if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE &&
|
||||
|
|
|
@ -47,7 +47,7 @@ static int ebt_redirect_tg_check(const struct xt_tgchk_param *par)
|
|||
(strcmp(par->table, "broute") != 0 ||
|
||||
hook_mask & ~(1 << NF_BR_BROUTING)))
|
||||
return -EINVAL;
|
||||
if (INVALID_TARGET)
|
||||
if (ebt_invalid_target(info->target))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ static int ebt_snat_tg_check(const struct xt_tgchk_param *par)
|
|||
if (BASE_CHAIN && tmp == EBT_RETURN)
|
||||
return -EINVAL;
|
||||
|
||||
if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
|
||||
if (ebt_invalid_target(tmp))
|
||||
return -EINVAL;
|
||||
tmp = info->target | EBT_VERDICT_BITS;
|
||||
if ((tmp & ~NAT_ARP_BIT) != ~NAT_ARP_BIT)
|
||||
|
|
|
@ -501,6 +501,23 @@ static void cleanup_net(struct work_struct *work)
|
|||
net_drop_ns(net);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* net_ns_barrier - wait until concurrent net_cleanup_work is done
|
||||
*
|
||||
* cleanup_net runs from work queue and will first remove namespaces
|
||||
* from the global list, then run net exit functions.
|
||||
*
|
||||
* Call this in module exit path to make sure that all netns
|
||||
* ->exit ops have been invoked before the function is removed.
|
||||
*/
|
||||
void net_ns_barrier(void)
|
||||
{
|
||||
mutex_lock(&net_mutex);
|
||||
mutex_unlock(&net_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(net_ns_barrier);
|
||||
|
||||
static DECLARE_WORK(net_cleanup_work, cleanup_net);
|
||||
|
||||
void __put_net(struct net *net)
|
||||
|
|
|
@ -47,7 +47,7 @@ struct clusterip_config {
|
|||
|
||||
__be32 clusterip; /* the IP address */
|
||||
u_int8_t clustermac[ETH_ALEN]; /* the MAC address */
|
||||
struct net_device *dev; /* device */
|
||||
int ifindex; /* device ifindex */
|
||||
u_int16_t num_total_nodes; /* total number of nodes */
|
||||
unsigned long local_nodes; /* node number array */
|
||||
|
||||
|
@ -57,6 +57,9 @@ struct clusterip_config {
|
|||
enum clusterip_hashmode hash_mode; /* which hashing mode */
|
||||
u_int32_t hash_initval; /* hash initialization */
|
||||
struct rcu_head rcu;
|
||||
|
||||
char ifname[IFNAMSIZ]; /* device ifname */
|
||||
struct notifier_block notifier; /* refresh c->ifindex in it */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
@ -98,9 +101,8 @@ clusterip_config_put(struct clusterip_config *c)
|
|||
* entry(rule) is removed, remove the config from lists, but don't free it
|
||||
* yet, since proc-files could still be holding references */
|
||||
static inline void
|
||||
clusterip_config_entry_put(struct clusterip_config *c)
|
||||
clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
|
||||
{
|
||||
struct net *net = dev_net(c->dev);
|
||||
struct clusterip_net *cn = net_generic(net, clusterip_net_id);
|
||||
|
||||
local_bh_disable();
|
||||
|
@ -109,8 +111,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
|
|||
spin_unlock(&cn->lock);
|
||||
local_bh_enable();
|
||||
|
||||
dev_mc_del(c->dev, c->clustermac);
|
||||
dev_put(c->dev);
|
||||
unregister_netdevice_notifier(&c->notifier);
|
||||
|
||||
/* In case anyone still accesses the file, the open/close
|
||||
* functions are also incrementing the refcount on their own,
|
||||
|
@ -170,19 +171,55 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
|
|||
set_bit(i->local_nodes[n] - 1, &c->local_nodes);
|
||||
}
|
||||
|
||||
static struct clusterip_config *
|
||||
clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
|
||||
struct net_device *dev)
|
||||
static int
|
||||
clusterip_netdev_event(struct notifier_block *this, unsigned long event,
|
||||
void *ptr)
|
||||
{
|
||||
struct net *net = dev_net(dev);
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct clusterip_config *c;
|
||||
|
||||
c = container_of(this, struct clusterip_config, notifier);
|
||||
switch (event) {
|
||||
case NETDEV_REGISTER:
|
||||
if (!strcmp(dev->name, c->ifname)) {
|
||||
c->ifindex = dev->ifindex;
|
||||
dev_mc_add(dev, c->clustermac);
|
||||
}
|
||||
break;
|
||||
case NETDEV_UNREGISTER:
|
||||
if (dev->ifindex == c->ifindex) {
|
||||
dev_mc_del(dev, c->clustermac);
|
||||
c->ifindex = -1;
|
||||
}
|
||||
break;
|
||||
case NETDEV_CHANGENAME:
|
||||
if (!strcmp(dev->name, c->ifname)) {
|
||||
c->ifindex = dev->ifindex;
|
||||
dev_mc_add(dev, c->clustermac);
|
||||
} else if (dev->ifindex == c->ifindex) {
|
||||
dev_mc_del(dev, c->clustermac);
|
||||
c->ifindex = -1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct clusterip_config *
|
||||
clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
|
||||
__be32 ip, const char *iniface)
|
||||
{
|
||||
struct clusterip_net *cn = net_generic(net, clusterip_net_id);
|
||||
struct clusterip_config *c;
|
||||
int err;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_ATOMIC);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
c->dev = dev;
|
||||
strcpy(c->ifname, iniface);
|
||||
c->ifindex = -1;
|
||||
c->clusterip = ip;
|
||||
memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
|
||||
c->num_total_nodes = i->num_total_nodes;
|
||||
|
@ -213,17 +250,27 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
|
|||
cn->procdir,
|
||||
&clusterip_proc_fops, c);
|
||||
if (!c->pde) {
|
||||
spin_lock_bh(&cn->lock);
|
||||
list_del_rcu(&c->list);
|
||||
spin_unlock_bh(&cn->lock);
|
||||
kfree(c);
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return c;
|
||||
c->notifier.notifier_call = clusterip_netdev_event;
|
||||
err = register_netdevice_notifier(&c->notifier);
|
||||
if (!err)
|
||||
return c;
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
proc_remove(c->pde);
|
||||
err:
|
||||
#endif
|
||||
spin_lock_bh(&cn->lock);
|
||||
list_del_rcu(&c->list);
|
||||
spin_unlock_bh(&cn->lock);
|
||||
kfree(c);
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
@ -425,14 +472,13 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
|
|||
e->ip.iniface);
|
||||
return -ENOENT;
|
||||
}
|
||||
dev_put(dev);
|
||||
|
||||
config = clusterip_config_init(cipinfo,
|
||||
e->ip.dst.s_addr, dev);
|
||||
if (IS_ERR(config)) {
|
||||
dev_put(dev);
|
||||
config = clusterip_config_init(par->net, cipinfo,
|
||||
e->ip.dst.s_addr,
|
||||
e->ip.iniface);
|
||||
if (IS_ERR(config))
|
||||
return PTR_ERR(config);
|
||||
}
|
||||
dev_mc_add(config->dev, config->clustermac);
|
||||
}
|
||||
}
|
||||
cipinfo->config = config;
|
||||
|
@ -458,7 +504,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
|
|||
|
||||
/* if no more entries are referencing the config, remove it
|
||||
* from the list and destroy the proc entry */
|
||||
clusterip_config_entry_put(cipinfo->config);
|
||||
clusterip_config_entry_put(par->net, cipinfo->config);
|
||||
|
||||
clusterip_config_put(cipinfo->config);
|
||||
|
||||
|
@ -558,10 +604,9 @@ arp_mangle(void *priv,
|
|||
* addresses on different interfacs. However, in the CLUSTERIP case
|
||||
* this wouldn't work, since we didn't subscribe the mcast group on
|
||||
* other interfaces */
|
||||
if (c->dev != state->out) {
|
||||
pr_debug("not mangling arp reply on different "
|
||||
"interface: cip'%s'-skb'%s'\n",
|
||||
c->dev->name, state->out->name);
|
||||
if (c->ifindex != state->out->ifindex) {
|
||||
pr_debug("not mangling arp reply on different interface: cip'%d'-skb'%d'\n",
|
||||
c->ifindex, state->out->ifindex);
|
||||
clusterip_config_put(c);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
@ -743,14 +788,20 @@ static const struct file_operations clusterip_proc_fops = {
|
|||
static int clusterip_net_init(struct net *net)
|
||||
{
|
||||
struct clusterip_net *cn = net_generic(net, clusterip_net_id);
|
||||
int ret;
|
||||
|
||||
INIT_LIST_HEAD(&cn->configs);
|
||||
|
||||
spin_lock_init(&cn->lock);
|
||||
|
||||
ret = nf_register_net_hook(net, &cip_arp_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
|
||||
if (!cn->procdir) {
|
||||
nf_unregister_net_hook(net, &cip_arp_ops);
|
||||
pr_err("Unable to proc dir entry\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -765,6 +816,7 @@ static void clusterip_net_exit(struct net *net)
|
|||
struct clusterip_net *cn = net_generic(net, clusterip_net_id);
|
||||
proc_remove(cn->procdir);
|
||||
#endif
|
||||
nf_unregister_net_hook(net, &cip_arp_ops);
|
||||
}
|
||||
|
||||
static struct pernet_operations clusterip_net_ops = {
|
||||
|
@ -786,17 +838,11 @@ static int __init clusterip_tg_init(void)
|
|||
if (ret < 0)
|
||||
goto cleanup_subsys;
|
||||
|
||||
ret = nf_register_hook(&cip_arp_ops);
|
||||
if (ret < 0)
|
||||
goto cleanup_target;
|
||||
|
||||
pr_info("ClusterIP Version %s loaded successfully\n",
|
||||
CLUSTERIP_VERSION);
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_target:
|
||||
xt_unregister_target(&clusterip_tg_reg);
|
||||
cleanup_subsys:
|
||||
unregister_pernet_subsys(&clusterip_net_ops);
|
||||
return ret;
|
||||
|
@ -806,7 +852,6 @@ static void __exit clusterip_tg_exit(void)
|
|||
{
|
||||
pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
|
||||
|
||||
nf_unregister_hook(&cip_arp_ops);
|
||||
xt_unregister_target(&clusterip_tg_reg);
|
||||
unregister_pernet_subsys(&clusterip_net_ops);
|
||||
|
||||
|
|
|
@ -98,8 +98,8 @@ static int masq_device_event(struct notifier_block *this,
|
|||
*/
|
||||
NF_CT_ASSERT(dev->ifindex != 0);
|
||||
|
||||
nf_ct_iterate_cleanup(net, device_cmp,
|
||||
(void *)(long)dev->ifindex, 0, 0);
|
||||
nf_ct_iterate_cleanup_net(net, device_cmp,
|
||||
(void *)(long)dev->ifindex, 0, 0);
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
|
|
|
@ -75,8 +75,8 @@ static int masq_device_event(struct notifier_block *this,
|
|||
struct net *net = dev_net(dev);
|
||||
|
||||
if (event == NETDEV_DOWN)
|
||||
nf_ct_iterate_cleanup(net, device_cmp,
|
||||
(void *)(long)dev->ifindex, 0, 0);
|
||||
nf_ct_iterate_cleanup_net(net, device_cmp,
|
||||
(void *)(long)dev->ifindex, 0, 0);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ static void iterate_cleanup_work(struct work_struct *work)
|
|||
w = container_of(work, struct masq_dev_work, work);
|
||||
|
||||
index = w->ifindex;
|
||||
nf_ct_iterate_cleanup(w->net, device_cmp, (void *)index, 0, 0);
|
||||
nf_ct_iterate_cleanup_net(w->net, device_cmp, (void *)index, 0, 0);
|
||||
|
||||
put_net(w->net);
|
||||
kfree(w);
|
||||
|
@ -110,12 +110,12 @@ static void iterate_cleanup_work(struct work_struct *work)
|
|||
/* ipv6 inet notifier is an atomic notifier, i.e. we cannot
|
||||
* schedule.
|
||||
*
|
||||
* Unfortunately, nf_ct_iterate_cleanup can run for a long
|
||||
* Unfortunately, nf_ct_iterate_cleanup_net can run for a long
|
||||
* time if there are lots of conntracks and the system
|
||||
* handles high softirq load, so it frequently calls cond_resched
|
||||
* while iterating the conntrack table.
|
||||
*
|
||||
* So we defer nf_ct_iterate_cleanup walk to the system workqueue.
|
||||
* So we defer nf_ct_iterate_cleanup_net walk to the system workqueue.
|
||||
*
|
||||
* As we can have 'a lot' of inet_events (depending on amount
|
||||
* of ipv6 addresses being deleted), we also need to add an upper
|
||||
|
|
|
@ -70,10 +70,9 @@ obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
|
|||
obj-$(CONFIG_NF_DUP_NETDEV) += nf_dup_netdev.o
|
||||
|
||||
# nf_tables
|
||||
nf_tables-objs += nf_tables_core.o nf_tables_api.o nf_tables_trace.o
|
||||
nf_tables-objs += nft_immediate.o nft_cmp.o nft_range.o
|
||||
nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
|
||||
nf_tables-objs += nft_lookup.o nft_dynset.o
|
||||
nf_tables-objs := nf_tables_core.o nf_tables_api.o nf_tables_trace.o \
|
||||
nft_immediate.o nft_cmp.o nft_range.o nft_bitwise.o \
|
||||
nft_byteorder.o nft_payload.o nft_lookup.o nft_dynset.o
|
||||
|
||||
obj-$(CONFIG_NF_TABLES) += nf_tables.o
|
||||
obj-$(CONFIG_NF_TABLES_INET) += nf_tables_inet.o
|
||||
|
|
|
@ -841,14 +841,16 @@ find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
|
|||
|
||||
static int ip_set_none(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int ip_set_create(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
struct ip_set *set, *clash = NULL;
|
||||
|
@ -989,7 +991,8 @@ ip_set_destroy_set(struct ip_set *set)
|
|||
|
||||
static int ip_set_destroy(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
struct ip_set *s;
|
||||
|
@ -1067,7 +1070,8 @@ ip_set_flush_set(struct ip_set *set)
|
|||
|
||||
static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
struct ip_set *s;
|
||||
|
@ -1106,7 +1110,8 @@ ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = {
|
|||
|
||||
static int ip_set_rename(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
struct ip_set *set, *s;
|
||||
|
@ -1155,7 +1160,8 @@ out:
|
|||
|
||||
static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
struct ip_set *from, *to;
|
||||
|
@ -1428,7 +1434,8 @@ out:
|
|||
|
||||
static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
if (unlikely(protocol_failed(attr)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
@ -1513,7 +1520,8 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
|
|||
|
||||
static int ip_set_uadd(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
struct ip_set *set;
|
||||
|
@ -1567,7 +1575,8 @@ static int ip_set_uadd(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
|||
|
||||
static int ip_set_udel(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
struct ip_set *set;
|
||||
|
@ -1621,7 +1630,8 @@ static int ip_set_udel(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
|||
|
||||
static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
struct ip_set *set;
|
||||
|
@ -1656,7 +1666,8 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
|||
|
||||
static int ip_set_header(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
const struct ip_set *set;
|
||||
|
@ -1712,7 +1723,8 @@ static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = {
|
|||
|
||||
static int ip_set_type(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct sk_buff *skb2;
|
||||
struct nlmsghdr *nlh2;
|
||||
|
@ -1770,7 +1782,8 @@ ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = {
|
|||
|
||||
static int ip_set_protocol(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const attr[])
|
||||
const struct nlattr * const attr[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct sk_buff *skb2;
|
||||
struct nlmsghdr *nlh2;
|
||||
|
|
|
@ -197,8 +197,8 @@ static void __exit nf_conntrack_amanda_fini(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
nf_conntrack_helper_unregister(&amanda_helper[0]);
|
||||
nf_conntrack_helper_unregister(&amanda_helper[1]);
|
||||
nf_conntrack_helpers_unregister(amanda_helper,
|
||||
ARRAY_SIZE(amanda_helper));
|
||||
for (i = 0; i < ARRAY_SIZE(search); i++)
|
||||
textsearch_destroy(search[i].ts);
|
||||
}
|
||||
|
@ -218,16 +218,12 @@ static int __init nf_conntrack_amanda_init(void)
|
|||
goto err1;
|
||||
}
|
||||
}
|
||||
ret = nf_conntrack_helper_register(&amanda_helper[0]);
|
||||
ret = nf_conntrack_helpers_register(amanda_helper,
|
||||
ARRAY_SIZE(amanda_helper));
|
||||
if (ret < 0)
|
||||
goto err1;
|
||||
ret = nf_conntrack_helper_register(&amanda_helper[1]);
|
||||
if (ret < 0)
|
||||
goto err2;
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
nf_conntrack_helper_unregister(&amanda_helper[0]);
|
||||
err1:
|
||||
while (--i >= 0)
|
||||
textsearch_destroy(search[i].ts);
|
||||
|
|
|
@ -1586,13 +1586,12 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
|
|||
|
||||
/* Bring out ya dead! */
|
||||
static struct nf_conn *
|
||||
get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
|
||||
get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
|
||||
void *data, unsigned int *bucket)
|
||||
{
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conn *ct;
|
||||
struct hlist_nulls_node *n;
|
||||
int cpu;
|
||||
spinlock_t *lockp;
|
||||
|
||||
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
|
||||
|
@ -1604,8 +1603,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
|
|||
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
|
||||
continue;
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
if (net_eq(nf_ct_net(ct), net) &&
|
||||
iter(ct, data))
|
||||
if (iter(ct, data))
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
|
@ -1614,18 +1612,6 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
|
|||
cond_resched();
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
|
||||
|
||||
spin_lock_bh(&pcpu->lock);
|
||||
hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
if (iter(ct, data))
|
||||
set_bit(IPS_DYING_BIT, &ct->status);
|
||||
}
|
||||
spin_unlock_bh(&pcpu->lock);
|
||||
cond_resched();
|
||||
}
|
||||
return NULL;
|
||||
found:
|
||||
atomic_inc(&ct->ct_general.use);
|
||||
|
@ -1634,31 +1620,142 @@ found:
|
|||
return ct;
|
||||
}
|
||||
|
||||
void nf_ct_iterate_cleanup(struct net *net,
|
||||
int (*iter)(struct nf_conn *i, void *data),
|
||||
void *data, u32 portid, int report)
|
||||
static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
|
||||
void *data, u32 portid, int report)
|
||||
{
|
||||
unsigned int bucket = 0, sequence;
|
||||
struct nf_conn *ct;
|
||||
unsigned int bucket = 0;
|
||||
|
||||
might_sleep();
|
||||
|
||||
for (;;) {
|
||||
sequence = read_seqcount_begin(&nf_conntrack_generation);
|
||||
|
||||
while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
|
||||
/* Time to push up daises... */
|
||||
|
||||
nf_ct_delete(ct, portid, report);
|
||||
nf_ct_put(ct);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
|
||||
break;
|
||||
bucket = 0;
|
||||
}
|
||||
}
|
||||
|
||||
struct iter_data {
|
||||
int (*iter)(struct nf_conn *i, void *data);
|
||||
void *data;
|
||||
struct net *net;
|
||||
};
|
||||
|
||||
static int iter_net_only(struct nf_conn *i, void *data)
|
||||
{
|
||||
struct iter_data *d = data;
|
||||
|
||||
if (!net_eq(d->net, nf_ct_net(i)))
|
||||
return 0;
|
||||
|
||||
return d->iter(i, d->data);
|
||||
}
|
||||
|
||||
static void
|
||||
__nf_ct_unconfirmed_destroy(struct net *net)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct hlist_nulls_node *n;
|
||||
struct ct_pcpu *pcpu;
|
||||
|
||||
pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
|
||||
|
||||
spin_lock_bh(&pcpu->lock);
|
||||
hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
|
||||
struct nf_conn *ct;
|
||||
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
||||
/* we cannot call iter() on unconfirmed list, the
|
||||
* owning cpu can reallocate ct->ext at any time.
|
||||
*/
|
||||
set_bit(IPS_DYING_BIT, &ct->status);
|
||||
}
|
||||
spin_unlock_bh(&pcpu->lock);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
void nf_ct_iterate_cleanup_net(struct net *net,
|
||||
int (*iter)(struct nf_conn *i, void *data),
|
||||
void *data, u32 portid, int report)
|
||||
{
|
||||
struct iter_data d;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (atomic_read(&net->ct.count) == 0)
|
||||
return;
|
||||
|
||||
while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
|
||||
/* Time to push up daises... */
|
||||
__nf_ct_unconfirmed_destroy(net);
|
||||
|
||||
nf_ct_delete(ct, portid, report);
|
||||
nf_ct_put(ct);
|
||||
cond_resched();
|
||||
}
|
||||
d.iter = iter;
|
||||
d.data = data;
|
||||
d.net = net;
|
||||
|
||||
synchronize_net();
|
||||
|
||||
nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
|
||||
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
|
||||
|
||||
/**
|
||||
* nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
|
||||
* @iter: callback to invoke for each conntrack
|
||||
* @data: data to pass to @iter
|
||||
*
|
||||
* Like nf_ct_iterate_cleanup, but first marks conntracks on the
|
||||
* unconfirmed list as dying (so they will not be inserted into
|
||||
* main table).
|
||||
*
|
||||
* Can only be called in module exit path.
|
||||
*/
|
||||
void
|
||||
nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
|
||||
{
|
||||
struct net *net;
|
||||
|
||||
rtnl_lock();
|
||||
for_each_net(net) {
|
||||
if (atomic_read(&net->ct.count) == 0)
|
||||
continue;
|
||||
__nf_ct_unconfirmed_destroy(net);
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
/* Need to wait for netns cleanup worker to finish, if its
|
||||
* running -- it might have deleted a net namespace from
|
||||
* the global list, so our __nf_ct_unconfirmed_destroy() might
|
||||
* not have affected all namespaces.
|
||||
*/
|
||||
net_ns_barrier();
|
||||
|
||||
/* a conntrack could have been unlinked from unconfirmed list
|
||||
* before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
|
||||
* This makes sure its inserted into conntrack table.
|
||||
*/
|
||||
synchronize_net();
|
||||
|
||||
nf_ct_iterate_cleanup(iter, data, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
|
||||
|
||||
static int kill_all(struct nf_conn *i, void *data)
|
||||
{
|
||||
return 1;
|
||||
return net_eq(nf_ct_net(i), data);
|
||||
}
|
||||
|
||||
void nf_ct_free_hashtable(void *hash, unsigned int size)
|
||||
|
@ -1723,7 +1820,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
|
|||
i_see_dead_people:
|
||||
busy = 0;
|
||||
list_for_each_entry(net, net_exit_list, exit_list) {
|
||||
nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
|
||||
nf_ct_iterate_cleanup(kill_all, net, 0, 0);
|
||||
if (atomic_read(&net->ct.count) != 0)
|
||||
busy = 1;
|
||||
}
|
||||
|
|
|
@ -1815,14 +1815,44 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
|
|||
},
|
||||
};
|
||||
|
||||
static int __init h323_helper_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nf_conntrack_helper_register(&nf_conntrack_helper_h245);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = nf_conntrack_helpers_register(nf_conntrack_helper_q931,
|
||||
ARRAY_SIZE(nf_conntrack_helper_q931));
|
||||
if (ret < 0)
|
||||
goto err1;
|
||||
ret = nf_conntrack_helpers_register(nf_conntrack_helper_ras,
|
||||
ARRAY_SIZE(nf_conntrack_helper_ras));
|
||||
if (ret < 0)
|
||||
goto err2;
|
||||
|
||||
return 0;
|
||||
err2:
|
||||
nf_conntrack_helpers_unregister(nf_conntrack_helper_q931,
|
||||
ARRAY_SIZE(nf_conntrack_helper_q931));
|
||||
err1:
|
||||
nf_conntrack_helper_unregister(&nf_conntrack_helper_h245);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit h323_helper_exit(void)
|
||||
{
|
||||
nf_conntrack_helpers_unregister(nf_conntrack_helper_ras,
|
||||
ARRAY_SIZE(nf_conntrack_helper_ras));
|
||||
nf_conntrack_helpers_unregister(nf_conntrack_helper_q931,
|
||||
ARRAY_SIZE(nf_conntrack_helper_q931));
|
||||
nf_conntrack_helper_unregister(&nf_conntrack_helper_h245);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
static void __exit nf_conntrack_h323_fini(void)
|
||||
{
|
||||
nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[1]);
|
||||
nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]);
|
||||
nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]);
|
||||
nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]);
|
||||
nf_conntrack_helper_unregister(&nf_conntrack_helper_h245);
|
||||
h323_helper_exit();
|
||||
kfree(h323_buffer);
|
||||
pr_debug("nf_ct_h323: fini\n");
|
||||
}
|
||||
|
@ -1837,32 +1867,11 @@ static int __init nf_conntrack_h323_init(void)
|
|||
h323_buffer = kmalloc(65536, GFP_KERNEL);
|
||||
if (!h323_buffer)
|
||||
return -ENOMEM;
|
||||
ret = nf_conntrack_helper_register(&nf_conntrack_helper_h245);
|
||||
ret = h323_helper_init();
|
||||
if (ret < 0)
|
||||
goto err1;
|
||||
ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[0]);
|
||||
if (ret < 0)
|
||||
goto err2;
|
||||
ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[1]);
|
||||
if (ret < 0)
|
||||
goto err3;
|
||||
ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[0]);
|
||||
if (ret < 0)
|
||||
goto err4;
|
||||
ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[1]);
|
||||
if (ret < 0)
|
||||
goto err5;
|
||||
pr_debug("nf_ct_h323: init success\n");
|
||||
return 0;
|
||||
|
||||
err5:
|
||||
nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]);
|
||||
err4:
|
||||
nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]);
|
||||
err3:
|
||||
nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]);
|
||||
err2:
|
||||
nf_conntrack_helper_unregister(&nf_conntrack_helper_h245);
|
||||
err1:
|
||||
kfree(h323_buffer);
|
||||
return ret;
|
||||
|
|
|
@ -285,16 +285,16 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
|
|||
EXPORT_SYMBOL_GPL(__nf_ct_try_assign_helper);
|
||||
|
||||
/* appropriate ct lock protecting must be taken by caller */
|
||||
static inline int unhelp(struct nf_conntrack_tuple_hash *i,
|
||||
const struct nf_conntrack_helper *me)
|
||||
static int unhelp(struct nf_conn *ct, void *me)
|
||||
{
|
||||
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
|
||||
struct nf_conn_help *help = nfct_help(ct);
|
||||
|
||||
if (help && rcu_dereference_raw(help->helper) == me) {
|
||||
nf_conntrack_event(IPCT_HELPER, ct);
|
||||
RCU_INIT_POINTER(help->helper, NULL);
|
||||
}
|
||||
|
||||
/* We are not intended to delete this conntrack. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -437,33 +437,10 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_helper_register);
|
||||
|
||||
static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
|
||||
struct net *net)
|
||||
{
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
const struct hlist_nulls_node *nn;
|
||||
int cpu;
|
||||
|
||||
/* Get rid of expecteds, set helpers to NULL. */
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
|
||||
|
||||
spin_lock_bh(&pcpu->lock);
|
||||
hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
|
||||
unhelp(h, me);
|
||||
spin_unlock_bh(&pcpu->lock);
|
||||
}
|
||||
}
|
||||
|
||||
void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
|
||||
{
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conntrack_expect *exp;
|
||||
const struct hlist_node *next;
|
||||
const struct hlist_nulls_node *nn;
|
||||
unsigned int last_hsize;
|
||||
spinlock_t *lock;
|
||||
struct net *net;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&nf_ct_helper_mutex);
|
||||
|
@ -491,26 +468,7 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
|
|||
}
|
||||
spin_unlock_bh(&nf_conntrack_expect_lock);
|
||||
|
||||
rtnl_lock();
|
||||
for_each_net(net)
|
||||
__nf_conntrack_helper_unregister(me, net);
|
||||
rtnl_unlock();
|
||||
|
||||
local_bh_disable();
|
||||
restart:
|
||||
last_hsize = nf_conntrack_htable_size;
|
||||
for (i = 0; i < last_hsize; i++) {
|
||||
lock = &nf_conntrack_locks[i % CONNTRACK_LOCKS];
|
||||
nf_conntrack_lock(lock);
|
||||
if (last_hsize != nf_conntrack_htable_size) {
|
||||
spin_unlock(lock);
|
||||
goto restart;
|
||||
}
|
||||
hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode)
|
||||
unhelp(h, me);
|
||||
spin_unlock(lock);
|
||||
}
|
||||
local_bh_enable();
|
||||
nf_ct_iterate_destroy(unhelp, me);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
|
||||
|
||||
|
|
|
@ -636,11 +636,11 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
|
|||
if (events & (1 << IPCT_DESTROY)) {
|
||||
type = IPCTNL_MSG_CT_DELETE;
|
||||
group = NFNLGRP_CONNTRACK_DESTROY;
|
||||
} else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
|
||||
} else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
|
||||
type = IPCTNL_MSG_CT_NEW;
|
||||
flags = NLM_F_CREATE|NLM_F_EXCL;
|
||||
group = NFNLGRP_CONNTRACK_NEW;
|
||||
} else if (events) {
|
||||
} else if (events) {
|
||||
type = IPCTNL_MSG_CT_NEW;
|
||||
group = NFNLGRP_CONNTRACK_UPDATE;
|
||||
} else
|
||||
|
@ -1122,8 +1122,8 @@ static int ctnetlink_flush_conntrack(struct net *net,
|
|||
return PTR_ERR(filter);
|
||||
}
|
||||
|
||||
nf_ct_iterate_cleanup(net, ctnetlink_filter_match, filter,
|
||||
portid, report);
|
||||
nf_ct_iterate_cleanup_net(net, ctnetlink_filter_match, filter,
|
||||
portid, report);
|
||||
kfree(filter);
|
||||
|
||||
return 0;
|
||||
|
@ -1132,7 +1132,8 @@ static int ctnetlink_flush_conntrack(struct net *net,
|
|||
static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
|
@ -1184,7 +1185,8 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
|
|||
static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
|
@ -1345,7 +1347,8 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
static int ctnetlink_get_ct_dying(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
if (nlh->nlmsg_flags & NLM_F_DUMP) {
|
||||
struct netlink_dump_control c = {
|
||||
|
@ -1367,7 +1370,8 @@ ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
static int ctnetlink_get_ct_unconfirmed(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
if (nlh->nlmsg_flags & NLM_F_DUMP) {
|
||||
struct netlink_dump_control c = {
|
||||
|
@ -1906,7 +1910,8 @@ err1:
|
|||
static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nf_conntrack_tuple otuple, rtuple;
|
||||
struct nf_conntrack_tuple_hash *h = NULL;
|
||||
|
@ -2071,7 +2076,8 @@ ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
static int ctnetlink_stat_ct_cpu(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
if (nlh->nlmsg_flags & NLM_F_DUMP) {
|
||||
struct netlink_dump_control c = {
|
||||
|
@ -2116,7 +2122,8 @@ nlmsg_failure:
|
|||
|
||||
static int ctnetlink_stat_ct(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct sk_buff *skb2;
|
||||
int err;
|
||||
|
@ -2778,7 +2785,8 @@ out:
|
|||
static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
|
@ -2822,7 +2830,8 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
|
|||
|
||||
static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_conntrack_expect *exp;
|
||||
|
@ -2834,7 +2843,8 @@ static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
|
|||
|
||||
if (nlh->nlmsg_flags & NLM_F_DUMP) {
|
||||
if (cda[CTA_EXPECT_MASTER])
|
||||
return ctnetlink_dump_exp_ct(net, ctnl, skb, nlh, cda);
|
||||
return ctnetlink_dump_exp_ct(net, ctnl, skb, nlh, cda,
|
||||
extack);
|
||||
else {
|
||||
struct netlink_dump_control c = {
|
||||
.dump = ctnetlink_exp_dump_table,
|
||||
|
@ -2902,7 +2912,8 @@ out:
|
|||
|
||||
static int ctnetlink_del_expect(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nf_conntrack_expect *exp;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
|
@ -3190,7 +3201,8 @@ err_ct:
|
|||
|
||||
static int ctnetlink_new_expect(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_conntrack_expect *exp;
|
||||
|
@ -3296,7 +3308,8 @@ ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
static int ctnetlink_stat_exp_cpu(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
if (nlh->nlmsg_flags & NLM_F_DUMP) {
|
||||
struct netlink_dump_control c = {
|
||||
|
|
|
@ -28,8 +28,8 @@
|
|||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
|
||||
static struct nf_conntrack_l4proto __rcu **nf_ct_protos[PF_MAX] __read_mostly;
|
||||
struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX] __read_mostly;
|
||||
static struct nf_conntrack_l4proto __rcu **nf_ct_protos[NFPROTO_NUMPROTO] __read_mostly;
|
||||
struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO] __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_ct_l3protos);
|
||||
|
||||
static DEFINE_MUTEX(nf_ct_proto_mutex);
|
||||
|
@ -68,7 +68,7 @@ nf_ct_unregister_sysctl(struct ctl_table_header **header,
|
|||
struct nf_conntrack_l4proto *
|
||||
__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto)
|
||||
{
|
||||
if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
|
||||
if (unlikely(l3proto >= NFPROTO_NUMPROTO || nf_ct_protos[l3proto] == NULL))
|
||||
return &nf_conntrack_l4proto_generic;
|
||||
|
||||
return rcu_dereference(nf_ct_protos[l3proto][l4proto]);
|
||||
|
@ -212,7 +212,7 @@ int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto)
|
|||
int ret = 0;
|
||||
struct nf_conntrack_l3proto *old;
|
||||
|
||||
if (proto->l3proto >= AF_MAX)
|
||||
if (proto->l3proto >= NFPROTO_NUMPROTO)
|
||||
return -EBUSY;
|
||||
|
||||
if (proto->tuple_to_nlattr && !proto->nlattr_tuple_size)
|
||||
|
@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_register);
|
|||
|
||||
void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto)
|
||||
{
|
||||
BUG_ON(proto->l3proto >= AF_MAX);
|
||||
BUG_ON(proto->l3proto >= NFPROTO_NUMPROTO);
|
||||
|
||||
mutex_lock(&nf_ct_proto_mutex);
|
||||
BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
|
||||
|
@ -265,6 +265,8 @@ void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto)
|
|||
mutex_unlock(&nf_ct_proto_mutex);
|
||||
|
||||
synchronize_rcu();
|
||||
/* Remove all contrack entries for this protocol */
|
||||
nf_ct_iterate_destroy(kill_l3proto, proto);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_l3proto_unregister);
|
||||
|
||||
|
@ -280,9 +282,6 @@ void nf_ct_l3proto_pernet_unregister(struct net *net,
|
|||
*/
|
||||
if (proto->net_ns_put)
|
||||
proto->net_ns_put(net);
|
||||
|
||||
/* Remove all contrack entries for this protocol */
|
||||
nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister);
|
||||
|
||||
|
@ -342,7 +341,7 @@ int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *l4proto)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (l4proto->l3proto >= PF_MAX)
|
||||
if (l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos))
|
||||
return -EBUSY;
|
||||
|
||||
if ((l4proto->to_nlattr && !l4proto->nlattr_size) ||
|
||||
|
@ -421,17 +420,23 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register_one);
|
||||
|
||||
void nf_ct_l4proto_unregister_one(struct nf_conntrack_l4proto *l4proto)
|
||||
{
|
||||
BUG_ON(l4proto->l3proto >= PF_MAX);
|
||||
static void __nf_ct_l4proto_unregister_one(struct nf_conntrack_l4proto *l4proto)
|
||||
|
||||
{
|
||||
BUG_ON(l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos));
|
||||
|
||||
mutex_lock(&nf_ct_proto_mutex);
|
||||
BUG_ON(rcu_dereference_protected(
|
||||
nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
|
||||
lockdep_is_held(&nf_ct_proto_mutex)
|
||||
) != l4proto);
|
||||
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
|
||||
&nf_conntrack_l4proto_generic);
|
||||
}
|
||||
|
||||
void nf_ct_l4proto_unregister_one(struct nf_conntrack_l4proto *l4proto)
|
||||
{
|
||||
mutex_lock(&nf_ct_proto_mutex);
|
||||
__nf_ct_l4proto_unregister_one(l4proto);
|
||||
mutex_unlock(&nf_ct_proto_mutex);
|
||||
|
||||
synchronize_rcu();
|
||||
|
@ -448,9 +453,6 @@ void nf_ct_l4proto_pernet_unregister_one(struct net *net,
|
|||
|
||||
pn->users--;
|
||||
nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
|
||||
|
||||
/* Remove all contrack entries for this protocol */
|
||||
nf_ct_iterate_cleanup(net, kill_l4proto, l4proto, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one);
|
||||
|
||||
|
@ -500,8 +502,14 @@ EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register);
|
|||
void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto[],
|
||||
unsigned int num_proto)
|
||||
{
|
||||
mutex_lock(&nf_ct_proto_mutex);
|
||||
while (num_proto-- != 0)
|
||||
nf_ct_l4proto_unregister_one(l4proto[num_proto]);
|
||||
__nf_ct_l4proto_unregister_one(l4proto[num_proto]);
|
||||
mutex_unlock(&nf_ct_proto_mutex);
|
||||
|
||||
synchronize_net();
|
||||
/* Remove all contrack entries for this protocol */
|
||||
nf_ct_iterate_destroy(kill_l4proto, l4proto);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister);
|
||||
|
||||
|
@ -548,7 +556,7 @@ void nf_conntrack_proto_pernet_fini(struct net *net)
|
|||
int nf_conntrack_proto_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
for (i = 0; i < AF_MAX; i++)
|
||||
for (i = 0; i < NFPROTO_NUMPROTO; i++)
|
||||
rcu_assign_pointer(nf_ct_l3protos[i],
|
||||
&nf_conntrack_l3proto_generic);
|
||||
return 0;
|
||||
|
@ -558,6 +566,6 @@ void nf_conntrack_proto_fini(void)
|
|||
{
|
||||
unsigned int i;
|
||||
/* free l3proto protocol tables */
|
||||
for (i = 0; i < PF_MAX; i++)
|
||||
for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++)
|
||||
kfree(nf_ct_protos[i]);
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/netfilter.h>
|
||||
#include <linux/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_dup_netdev.h>
|
||||
|
||||
static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
|
|
|
@ -582,12 +582,8 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
|
|||
.l3proto = l3proto,
|
||||
.l4proto = l4proto,
|
||||
};
|
||||
struct net *net;
|
||||
|
||||
rtnl_lock();
|
||||
for_each_net(net)
|
||||
nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
|
||||
rtnl_unlock();
|
||||
nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
|
||||
}
|
||||
|
||||
static void nf_nat_l3proto_clean(u8 l3proto)
|
||||
|
@ -595,13 +591,8 @@ static void nf_nat_l3proto_clean(u8 l3proto)
|
|||
struct nf_nat_proto_clean clean = {
|
||||
.l3proto = l3proto,
|
||||
};
|
||||
struct net *net;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
for_each_net(net)
|
||||
nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
|
||||
rtnl_unlock();
|
||||
nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
|
||||
}
|
||||
|
||||
/* Protocol registration. */
|
||||
|
@ -822,17 +813,6 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
|
|||
}
|
||||
#endif
|
||||
|
||||
static void __net_exit nf_nat_net_exit(struct net *net)
|
||||
{
|
||||
struct nf_nat_proto_clean clean = {};
|
||||
|
||||
nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
|
||||
}
|
||||
|
||||
static struct pernet_operations nf_nat_net_ops = {
|
||||
.exit = nf_nat_net_exit,
|
||||
};
|
||||
|
||||
static struct nf_ct_helper_expectfn follow_master_nat = {
|
||||
.name = "nat-follow-master",
|
||||
.expectfn = nf_nat_follow_master,
|
||||
|
@ -853,10 +833,6 @@ static int __init nf_nat_init(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&nf_nat_net_ops);
|
||||
if (ret < 0)
|
||||
goto cleanup_extend;
|
||||
|
||||
nf_ct_helper_expectfn_register(&follow_master_nat);
|
||||
|
||||
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
|
||||
|
@ -867,18 +843,15 @@ static int __init nf_nat_init(void)
|
|||
RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
cleanup_extend:
|
||||
rhltable_destroy(&nf_nat_bysource_table);
|
||||
nf_ct_extend_unregister(&nat_extend);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit nf_nat_cleanup(void)
|
||||
{
|
||||
struct nf_nat_proto_clean clean = {};
|
||||
unsigned int i;
|
||||
|
||||
unregister_pernet_subsys(&nf_nat_net_ops);
|
||||
nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
|
||||
|
||||
nf_ct_extend_unregister(&nat_extend);
|
||||
nf_ct_helper_expectfn_unregister(&follow_master_nat);
|
||||
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/netfilter.h>
|
||||
#include <linux/netfilter/nfnetlink.h>
|
||||
#include <linux/netfilter/nf_tables.h>
|
||||
|
@ -386,7 +387,7 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
|
|||
return ++table->hgenerator;
|
||||
}
|
||||
|
||||
static const struct nf_chain_type *chain_type[AF_MAX][NFT_CHAIN_T_MAX];
|
||||
static const struct nf_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
|
||||
|
||||
static const struct nf_chain_type *
|
||||
__nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
|
||||
|
@ -534,7 +535,8 @@ done:
|
|||
|
||||
static int nf_tables_gettable(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_cur(net);
|
||||
|
@ -677,7 +679,8 @@ err:
|
|||
|
||||
static int nf_tables_newtable(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
|
@ -830,7 +833,8 @@ out:
|
|||
|
||||
static int nf_tables_deltable(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
|
@ -869,6 +873,9 @@ int nft_register_chain_type(const struct nf_chain_type *ctype)
|
|||
{
|
||||
int err = 0;
|
||||
|
||||
if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO))
|
||||
return -EINVAL;
|
||||
|
||||
nfnl_lock(NFNL_SUBSYS_NFTABLES);
|
||||
if (chain_type[ctype->family][ctype->type] != NULL) {
|
||||
err = -EBUSY;
|
||||
|
@ -1123,7 +1130,8 @@ done:
|
|||
|
||||
static int nf_tables_getchain(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_cur(net);
|
||||
|
@ -1319,7 +1327,8 @@ static void nft_chain_release_hook(struct nft_chain_hook *hook)
|
|||
|
||||
static int nf_tables_newchain(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
const struct nlattr * uninitialized_var(name);
|
||||
|
@ -1557,7 +1566,8 @@ err1:
|
|||
|
||||
static int nf_tables_delchain(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
|
@ -2038,7 +2048,8 @@ static int nf_tables_dump_rules_done(struct netlink_callback *cb)
|
|||
|
||||
static int nf_tables_getrule(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_cur(net);
|
||||
|
@ -2131,7 +2142,8 @@ static struct nft_expr_info *info;
|
|||
|
||||
static int nf_tables_newrule(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
|
@ -2313,7 +2325,8 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
|
|||
|
||||
static int nf_tables_delrule(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
|
@ -2377,64 +2390,77 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
|
|||
* Sets
|
||||
*/
|
||||
|
||||
static LIST_HEAD(nf_tables_set_ops);
|
||||
static LIST_HEAD(nf_tables_set_types);
|
||||
|
||||
int nft_register_set(struct nft_set_ops *ops)
|
||||
int nft_register_set(struct nft_set_type *type)
|
||||
{
|
||||
nfnl_lock(NFNL_SUBSYS_NFTABLES);
|
||||
list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
|
||||
list_add_tail_rcu(&type->list, &nf_tables_set_types);
|
||||
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nft_register_set);
|
||||
|
||||
void nft_unregister_set(struct nft_set_ops *ops)
|
||||
void nft_unregister_set(struct nft_set_type *type)
|
||||
{
|
||||
nfnl_lock(NFNL_SUBSYS_NFTABLES);
|
||||
list_del_rcu(&ops->list);
|
||||
list_del_rcu(&type->list);
|
||||
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nft_unregister_set);
|
||||
|
||||
#define NFT_SET_FEATURES (NFT_SET_INTERVAL | NFT_SET_MAP | \
|
||||
NFT_SET_TIMEOUT | NFT_SET_OBJECT)
|
||||
|
||||
static bool nft_set_ops_candidate(const struct nft_set_ops *ops, u32 flags)
|
||||
{
|
||||
return (flags & ops->features) == (flags & NFT_SET_FEATURES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Select a set implementation based on the data characteristics and the
|
||||
* given policy. The total memory use might not be known if no size is
|
||||
* given, in that case the amount of memory per element is used.
|
||||
*/
|
||||
static const struct nft_set_ops *
|
||||
nft_select_set_ops(const struct nlattr * const nla[],
|
||||
nft_select_set_ops(const struct nft_ctx *ctx,
|
||||
const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc,
|
||||
enum nft_set_policies policy)
|
||||
{
|
||||
const struct nft_set_ops *ops, *bops;
|
||||
struct nft_set_estimate est, best;
|
||||
u32 features;
|
||||
const struct nft_set_type *type;
|
||||
u32 flags = 0;
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
if (list_empty(&nf_tables_set_ops)) {
|
||||
if (list_empty(&nf_tables_set_types)) {
|
||||
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
|
||||
request_module("nft-set");
|
||||
nfnl_lock(NFNL_SUBSYS_NFTABLES);
|
||||
if (!list_empty(&nf_tables_set_ops))
|
||||
if (!list_empty(&nf_tables_set_types))
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
#endif
|
||||
features = 0;
|
||||
if (nla[NFTA_SET_FLAGS] != NULL) {
|
||||
features = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
|
||||
features &= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_TIMEOUT |
|
||||
NFT_SET_OBJECT;
|
||||
}
|
||||
if (nla[NFTA_SET_FLAGS] != NULL)
|
||||
flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
|
||||
|
||||
bops = NULL;
|
||||
best.size = ~0;
|
||||
best.lookup = ~0;
|
||||
best.space = ~0;
|
||||
|
||||
list_for_each_entry(ops, &nf_tables_set_ops, list) {
|
||||
if ((ops->features & features) != features)
|
||||
list_for_each_entry(type, &nf_tables_set_types, list) {
|
||||
if (!type->select_ops)
|
||||
ops = type->ops;
|
||||
else
|
||||
ops = type->select_ops(ctx, desc, flags);
|
||||
if (!ops)
|
||||
continue;
|
||||
if (!ops->estimate(desc, features, &est))
|
||||
|
||||
if (!nft_set_ops_candidate(ops, flags))
|
||||
continue;
|
||||
if (!ops->estimate(desc, flags, &est))
|
||||
continue;
|
||||
|
||||
switch (policy) {
|
||||
|
@ -2465,10 +2491,10 @@ nft_select_set_ops(const struct nlattr * const nla[],
|
|||
break;
|
||||
}
|
||||
|
||||
if (!try_module_get(ops->owner))
|
||||
if (!try_module_get(type->owner))
|
||||
continue;
|
||||
if (bops != NULL)
|
||||
module_put(bops->owner);
|
||||
module_put(bops->type->owner);
|
||||
|
||||
bops = ops;
|
||||
best = est;
|
||||
|
@ -2816,7 +2842,8 @@ static int nf_tables_dump_sets_done(struct netlink_callback *cb)
|
|||
|
||||
static int nf_tables_getset(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u8 genmask = nft_genmask_cur(net);
|
||||
const struct nft_set *set;
|
||||
|
@ -2892,7 +2919,8 @@ static int nf_tables_set_desc_parse(const struct nft_ctx *ctx,
|
|||
|
||||
static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
|
@ -3029,7 +3057,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
|||
if (!(nlh->nlmsg_flags & NLM_F_CREATE))
|
||||
return -ENOENT;
|
||||
|
||||
ops = nft_select_set_ops(nla, &desc, policy);
|
||||
ops = nft_select_set_ops(&ctx, nla, &desc, policy);
|
||||
if (IS_ERR(ops))
|
||||
return PTR_ERR(ops);
|
||||
|
||||
|
@ -3039,12 +3067,13 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
|||
|
||||
size = 0;
|
||||
if (ops->privsize != NULL)
|
||||
size = ops->privsize(nla);
|
||||
size = ops->privsize(nla, &desc);
|
||||
|
||||
err = -ENOMEM;
|
||||
set = kzalloc(sizeof(*set) + size + udlen, GFP_KERNEL);
|
||||
if (set == NULL)
|
||||
set = kvzalloc(sizeof(*set) + size + udlen, GFP_KERNEL);
|
||||
if (!set) {
|
||||
err = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
nla_strlcpy(name, nla[NFTA_SET_NAME], sizeof(set->name));
|
||||
err = nf_tables_set_alloc_name(&ctx, set, name);
|
||||
|
@ -3087,17 +3116,17 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
|||
err3:
|
||||
ops->destroy(set);
|
||||
err2:
|
||||
kfree(set);
|
||||
kvfree(set);
|
||||
err1:
|
||||
module_put(ops->owner);
|
||||
module_put(ops->type->owner);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void nft_set_destroy(struct nft_set *set)
|
||||
{
|
||||
set->ops->destroy(set);
|
||||
module_put(set->ops->owner);
|
||||
kfree(set);
|
||||
module_put(set->ops->type->owner);
|
||||
kvfree(set);
|
||||
}
|
||||
|
||||
static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
|
@ -3109,7 +3138,8 @@ static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set
|
|||
|
||||
static int nf_tables_delset(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
|
@ -3469,7 +3499,8 @@ static int nf_tables_dump_set_done(struct netlink_callback *cb)
|
|||
|
||||
static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u8 genmask = nft_genmask_cur(net);
|
||||
const struct nft_set *set;
|
||||
|
@ -3870,7 +3901,8 @@ err1:
|
|||
|
||||
static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
const struct nlattr *attr;
|
||||
|
@ -4067,7 +4099,8 @@ err1:
|
|||
|
||||
static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
const struct nlattr *attr;
|
||||
|
@ -4277,7 +4310,8 @@ static const struct nft_object_type *nft_obj_type_get(u32 objtype)
|
|||
|
||||
static int nf_tables_newobj(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
const struct nft_object_type *type;
|
||||
|
@ -4471,7 +4505,8 @@ nft_obj_filter_alloc(const struct nlattr * const nla[])
|
|||
|
||||
static int nf_tables_getobj(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_cur(net);
|
||||
|
@ -4549,8 +4584,9 @@ static void nft_obj_destroy(struct nft_object *obj)
|
|||
}
|
||||
|
||||
static int nf_tables_delobj(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
|
@ -4680,7 +4716,8 @@ err:
|
|||
|
||||
static int nf_tables_getgen(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[])
|
||||
const struct nlattr * const nla[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct sk_buff *skb2;
|
||||
int err;
|
||||
|
|
|
@ -201,7 +201,8 @@ replay:
|
|||
|
||||
if (nc->call_rcu) {
|
||||
err = nc->call_rcu(net, net->nfnl, skb, nlh,
|
||||
(const struct nlattr **)cda);
|
||||
(const struct nlattr **)cda,
|
||||
extack);
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
rcu_read_unlock();
|
||||
|
@ -211,7 +212,8 @@ replay:
|
|||
err = -EAGAIN;
|
||||
else if (nc->call)
|
||||
err = nc->call(net, net->nfnl, skb, nlh,
|
||||
(const struct nlattr **)cda);
|
||||
(const struct nlattr **)cda,
|
||||
extack);
|
||||
else
|
||||
err = -EINVAL;
|
||||
nfnl_unlock(subsys_id);
|
||||
|
@ -226,9 +228,11 @@ struct nfnl_err {
|
|||
struct list_head head;
|
||||
struct nlmsghdr *nlh;
|
||||
int err;
|
||||
struct netlink_ext_ack extack;
|
||||
};
|
||||
|
||||
static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err)
|
||||
static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err,
|
||||
const struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nfnl_err *nfnl_err;
|
||||
|
||||
|
@ -238,6 +242,7 @@ static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err)
|
|||
|
||||
nfnl_err->nlh = nlh;
|
||||
nfnl_err->err = err;
|
||||
nfnl_err->extack = *extack;
|
||||
list_add_tail(&nfnl_err->head, list);
|
||||
|
||||
return 0;
|
||||
|
@ -262,7 +267,8 @@ static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
|
|||
struct nfnl_err *nfnl_err, *next;
|
||||
|
||||
list_for_each_entry_safe(nfnl_err, next, err_list, head) {
|
||||
netlink_ack(skb, nfnl_err->nlh, nfnl_err->err, NULL);
|
||||
netlink_ack(skb, nfnl_err->nlh, nfnl_err->err,
|
||||
&nfnl_err->extack);
|
||||
nfnl_err_del(nfnl_err);
|
||||
}
|
||||
}
|
||||
|
@ -280,6 +286,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
struct net *net = sock_net(skb->sk);
|
||||
const struct nfnetlink_subsystem *ss;
|
||||
const struct nfnl_callback *nc;
|
||||
struct netlink_ext_ack extack;
|
||||
LIST_HEAD(err_list);
|
||||
u32 status;
|
||||
int err;
|
||||
|
@ -325,6 +332,7 @@ replay:
|
|||
while (skb->len >= nlmsg_total_size(0)) {
|
||||
int msglen, type;
|
||||
|
||||
memset(&extack, 0, sizeof(extack));
|
||||
nlh = nlmsg_hdr(skb);
|
||||
err = 0;
|
||||
|
||||
|
@ -384,7 +392,8 @@ replay:
|
|||
|
||||
if (nc->call_batch) {
|
||||
err = nc->call_batch(net, net->nfnl, skb, nlh,
|
||||
(const struct nlattr **)cda);
|
||||
(const struct nlattr **)cda,
|
||||
&extack);
|
||||
}
|
||||
|
||||
/* The lock was released to autoload some module, we
|
||||
|
@ -402,7 +411,7 @@ ack:
|
|||
* processed, this avoids that the same error is
|
||||
* reported several times when replaying the batch.
|
||||
*/
|
||||
if (nfnl_err_add(&err_list, nlh, err) < 0) {
|
||||
if (nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
|
||||
/* We failed to enqueue an error, reset the
|
||||
* list of errors and send OOM to userspace
|
||||
* pointing to the batch header.
|
||||
|
|
|
@ -49,7 +49,8 @@ struct nfacct_filter {
|
|||
|
||||
static int nfnl_acct_new(struct net *net, struct sock *nfnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const tb[])
|
||||
const struct nlattr * const tb[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nf_acct *nfacct, *matching = NULL;
|
||||
char *acct_name;
|
||||
|
@ -264,7 +265,8 @@ nfacct_filter_alloc(const struct nlattr * const attr)
|
|||
|
||||
static int nfnl_acct_get(struct net *net, struct sock *nfnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const tb[])
|
||||
const struct nlattr * const tb[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int ret = -ENOENT;
|
||||
struct nf_acct *cur;
|
||||
|
@ -343,7 +345,8 @@ static int nfnl_acct_try_del(struct nf_acct *cur)
|
|||
|
||||
static int nfnl_acct_del(struct net *net, struct sock *nfnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const tb[])
|
||||
const struct nlattr * const tb[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nf_acct *cur, *tmp;
|
||||
int ret = -ENOENT;
|
||||
|
|
|
@ -398,7 +398,8 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
|
|||
|
||||
static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const tb[])
|
||||
const struct nlattr * const tb[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const char *helper_name;
|
||||
struct nf_conntrack_helper *cur, *helper = NULL;
|
||||
|
@ -599,7 +600,8 @@ out:
|
|||
|
||||
static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const tb[])
|
||||
const struct nlattr * const tb[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int ret = -ENOENT;
|
||||
struct nf_conntrack_helper *cur;
|
||||
|
@ -666,7 +668,8 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
|
|||
|
||||
static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const tb[])
|
||||
const struct nlattr * const tb[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
char *helper_name = NULL;
|
||||
struct nf_conntrack_helper *cur;
|
||||
|
|
|
@ -69,7 +69,8 @@ ctnl_timeout_parse_policy(void *timeouts, struct nf_conntrack_l4proto *l4proto,
|
|||
static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
__u16 l3num;
|
||||
__u8 l4num;
|
||||
|
@ -239,7 +240,8 @@ ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
static int cttimeout_get_timeout(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int ret = -ENOENT;
|
||||
char *name;
|
||||
|
@ -287,49 +289,20 @@ static int cttimeout_get_timeout(struct net *net, struct sock *ctnl,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void untimeout(struct nf_conntrack_tuple_hash *i,
|
||||
struct ctnl_timeout *timeout)
|
||||
static int untimeout(struct nf_conn *ct, void *timeout)
|
||||
{
|
||||
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
|
||||
struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct);
|
||||
|
||||
if (timeout_ext && (!timeout || timeout_ext->timeout == timeout))
|
||||
RCU_INIT_POINTER(timeout_ext->timeout, NULL);
|
||||
|
||||
/* We are not intended to delete this conntrack. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
|
||||
{
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
const struct hlist_nulls_node *nn;
|
||||
unsigned int last_hsize;
|
||||
spinlock_t *lock;
|
||||
int i, cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
|
||||
|
||||
spin_lock_bh(&pcpu->lock);
|
||||
hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
|
||||
untimeout(h, timeout);
|
||||
spin_unlock_bh(&pcpu->lock);
|
||||
}
|
||||
|
||||
local_bh_disable();
|
||||
restart:
|
||||
last_hsize = nf_conntrack_htable_size;
|
||||
for (i = 0; i < last_hsize; i++) {
|
||||
lock = &nf_conntrack_locks[i % CONNTRACK_LOCKS];
|
||||
nf_conntrack_lock(lock);
|
||||
if (last_hsize != nf_conntrack_htable_size) {
|
||||
spin_unlock(lock);
|
||||
goto restart;
|
||||
}
|
||||
|
||||
hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode)
|
||||
untimeout(h, timeout);
|
||||
spin_unlock(lock);
|
||||
}
|
||||
local_bh_enable();
|
||||
nf_ct_iterate_cleanup_net(net, untimeout, timeout, 0, 0);
|
||||
}
|
||||
|
||||
/* try to delete object, fail if it is still in use. */
|
||||
|
@ -355,7 +328,8 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
|
|||
static int cttimeout_del_timeout(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ctnl_timeout *cur, *tmp;
|
||||
int ret = -ENOENT;
|
||||
|
@ -386,7 +360,8 @@ static int cttimeout_del_timeout(struct net *net, struct sock *ctnl,
|
|||
static int cttimeout_default_set(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
__u16 l3num;
|
||||
__u8 l4num;
|
||||
|
@ -475,7 +450,8 @@ nla_put_failure:
|
|||
static int cttimeout_default_get(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
const struct nlattr * const cda[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
__u16 l3num;
|
||||
__u8 l4num;
|
||||
|
|
|
@ -795,7 +795,8 @@ static struct notifier_block nfulnl_rtnl_notifier = {
|
|||
|
||||
static int nfulnl_recv_unsupp(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nfqa[])
|
||||
const struct nlattr * const nfqa[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
@ -818,7 +819,8 @@ static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = {
|
|||
|
||||
static int nfulnl_recv_config(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nfula[])
|
||||
const struct nlattr * const nfula[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u_int16_t group_num = ntohs(nfmsg->res_id);
|
||||
|
|
|
@ -1032,7 +1032,8 @@ static int nfq_id_after(unsigned int id, unsigned int max)
|
|||
static int nfqnl_recv_verdict_batch(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nfqa[])
|
||||
const struct nlattr * const nfqa[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
struct nf_queue_entry *entry, *tmp;
|
||||
|
@ -1136,7 +1137,8 @@ static int nfqa_parse_bridge(struct nf_queue_entry *entry,
|
|||
static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nfqa[])
|
||||
const struct nlattr * const nfqa[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u_int16_t queue_num = ntohs(nfmsg->res_id);
|
||||
|
@ -1200,7 +1202,8 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
|
|||
|
||||
static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nfqa[])
|
||||
const struct nlattr * const nfqa[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
@ -1217,7 +1220,8 @@ static const struct nf_queue_handler nfqh = {
|
|||
|
||||
static int nfqnl_recv_config(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nfqa[])
|
||||
const struct nlattr * const nfqa[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u_int16_t queue_num = ntohs(nfmsg->res_id);
|
||||
|
|
|
@ -530,7 +530,8 @@ nla_put_failure:
|
|||
|
||||
static int nfnl_compat_get(struct net *net, struct sock *nfnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const tb[])
|
||||
const struct nlattr * const tb[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int ret = 0, target;
|
||||
struct nfgenmsg *nfmsg;
|
||||
|
|
|
@ -23,9 +23,9 @@ struct nft_rt {
|
|||
enum nft_registers dreg:8;
|
||||
};
|
||||
|
||||
void nft_rt_get_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt)
|
||||
static void nft_rt_get_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt)
|
||||
{
|
||||
const struct nft_rt *priv = nft_expr_priv(expr);
|
||||
const struct sk_buff *skb = pkt->skb;
|
||||
|
@ -72,9 +72,9 @@ const struct nla_policy nft_rt_policy[NFTA_RT_MAX + 1] = {
|
|||
[NFTA_RT_KEY] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
int nft_rt_get_init(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
const struct nlattr * const tb[])
|
||||
static int nft_rt_get_init(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
const struct nlattr * const tb[])
|
||||
{
|
||||
struct nft_rt *priv = nft_expr_priv(expr);
|
||||
unsigned int len;
|
||||
|
@ -103,8 +103,8 @@ int nft_rt_get_init(const struct nft_ctx *ctx,
|
|||
NFT_DATA_VALUE, len);
|
||||
}
|
||||
|
||||
int nft_rt_get_dump(struct sk_buff *skb,
|
||||
const struct nft_expr *expr)
|
||||
static int nft_rt_get_dump(struct sk_buff *skb,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_rt *priv = nft_expr_priv(expr);
|
||||
|
||||
|
|
|
@ -236,7 +236,8 @@ static inline u32 nft_bitmap_total_size(u32 klen)
|
|||
return sizeof(struct nft_bitmap) + nft_bitmap_size(klen);
|
||||
}
|
||||
|
||||
static unsigned int nft_bitmap_privsize(const struct nlattr * const nla[])
|
||||
static unsigned int nft_bitmap_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
{
|
||||
u32 klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
|
||||
|
||||
|
@ -278,7 +279,9 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct nft_set_type nft_bitmap_type;
|
||||
static struct nft_set_ops nft_bitmap_ops __read_mostly = {
|
||||
.type = &nft_bitmap_type,
|
||||
.privsize = nft_bitmap_privsize,
|
||||
.elemsize = offsetof(struct nft_bitmap_elem, ext),
|
||||
.estimate = nft_bitmap_estimate,
|
||||
|
@ -291,17 +294,21 @@ static struct nft_set_ops nft_bitmap_ops __read_mostly = {
|
|||
.activate = nft_bitmap_activate,
|
||||
.lookup = nft_bitmap_lookup,
|
||||
.walk = nft_bitmap_walk,
|
||||
};
|
||||
|
||||
static struct nft_set_type nft_bitmap_type __read_mostly = {
|
||||
.ops = &nft_bitmap_ops,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init nft_bitmap_module_init(void)
|
||||
{
|
||||
return nft_register_set(&nft_bitmap_ops);
|
||||
return nft_register_set(&nft_bitmap_type);
|
||||
}
|
||||
|
||||
static void __exit nft_bitmap_module_exit(void)
|
||||
{
|
||||
nft_unregister_set(&nft_bitmap_ops);
|
||||
nft_unregister_set(&nft_bitmap_type);
|
||||
}
|
||||
|
||||
module_init(nft_bitmap_module_init);
|
||||
|
|
|
@ -22,45 +22,43 @@
|
|||
#include <net/netfilter/nf_tables.h>
|
||||
|
||||
/* We target a hash table size of 4, element hint is 75% of final size */
|
||||
#define NFT_HASH_ELEMENT_HINT 3
|
||||
#define NFT_RHASH_ELEMENT_HINT 3
|
||||
|
||||
struct nft_hash {
|
||||
struct nft_rhash {
|
||||
struct rhashtable ht;
|
||||
struct delayed_work gc_work;
|
||||
};
|
||||
|
||||
struct nft_hash_elem {
|
||||
struct nft_rhash_elem {
|
||||
struct rhash_head node;
|
||||
struct nft_set_ext ext;
|
||||
};
|
||||
|
||||
struct nft_hash_cmp_arg {
|
||||
struct nft_rhash_cmp_arg {
|
||||
const struct nft_set *set;
|
||||
const u32 *key;
|
||||
u8 genmask;
|
||||
};
|
||||
|
||||
static const struct rhashtable_params nft_hash_params;
|
||||
|
||||
static inline u32 nft_hash_key(const void *data, u32 len, u32 seed)
|
||||
static inline u32 nft_rhash_key(const void *data, u32 len, u32 seed)
|
||||
{
|
||||
const struct nft_hash_cmp_arg *arg = data;
|
||||
const struct nft_rhash_cmp_arg *arg = data;
|
||||
|
||||
return jhash(arg->key, len, seed);
|
||||
}
|
||||
|
||||
static inline u32 nft_hash_obj(const void *data, u32 len, u32 seed)
|
||||
static inline u32 nft_rhash_obj(const void *data, u32 len, u32 seed)
|
||||
{
|
||||
const struct nft_hash_elem *he = data;
|
||||
const struct nft_rhash_elem *he = data;
|
||||
|
||||
return jhash(nft_set_ext_key(&he->ext), len, seed);
|
||||
}
|
||||
|
||||
static inline int nft_hash_cmp(struct rhashtable_compare_arg *arg,
|
||||
const void *ptr)
|
||||
static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg,
|
||||
const void *ptr)
|
||||
{
|
||||
const struct nft_hash_cmp_arg *x = arg->key;
|
||||
const struct nft_hash_elem *he = ptr;
|
||||
const struct nft_rhash_cmp_arg *x = arg->key;
|
||||
const struct nft_rhash_elem *he = ptr;
|
||||
|
||||
if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
|
||||
return 1;
|
||||
|
@ -71,41 +69,49 @@ static inline int nft_hash_cmp(struct rhashtable_compare_arg *arg,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
|
||||
const u32 *key, const struct nft_set_ext **ext)
|
||||
static const struct rhashtable_params nft_rhash_params = {
|
||||
.head_offset = offsetof(struct nft_rhash_elem, node),
|
||||
.hashfn = nft_rhash_key,
|
||||
.obj_hashfn = nft_rhash_obj,
|
||||
.obj_cmpfn = nft_rhash_cmp,
|
||||
.automatic_shrinking = true,
|
||||
};
|
||||
|
||||
static bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
|
||||
const u32 *key, const struct nft_set_ext **ext)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
const struct nft_hash_elem *he;
|
||||
struct nft_hash_cmp_arg arg = {
|
||||
struct nft_rhash *priv = nft_set_priv(set);
|
||||
const struct nft_rhash_elem *he;
|
||||
struct nft_rhash_cmp_arg arg = {
|
||||
.genmask = nft_genmask_cur(net),
|
||||
.set = set,
|
||||
.key = key,
|
||||
};
|
||||
|
||||
he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
|
||||
he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
|
||||
if (he != NULL)
|
||||
*ext = &he->ext;
|
||||
|
||||
return !!he;
|
||||
}
|
||||
|
||||
static bool nft_hash_update(struct nft_set *set, const u32 *key,
|
||||
void *(*new)(struct nft_set *,
|
||||
const struct nft_expr *,
|
||||
struct nft_regs *regs),
|
||||
const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
const struct nft_set_ext **ext)
|
||||
static bool nft_rhash_update(struct nft_set *set, const u32 *key,
|
||||
void *(*new)(struct nft_set *,
|
||||
const struct nft_expr *,
|
||||
struct nft_regs *regs),
|
||||
const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
const struct nft_set_ext **ext)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
struct nft_hash_elem *he, *prev;
|
||||
struct nft_hash_cmp_arg arg = {
|
||||
struct nft_rhash *priv = nft_set_priv(set);
|
||||
struct nft_rhash_elem *he, *prev;
|
||||
struct nft_rhash_cmp_arg arg = {
|
||||
.genmask = NFT_GENMASK_ANY,
|
||||
.set = set,
|
||||
.key = key,
|
||||
};
|
||||
|
||||
he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
|
||||
he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
|
||||
if (he != NULL)
|
||||
goto out;
|
||||
|
||||
|
@ -114,7 +120,7 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key,
|
|||
goto err1;
|
||||
|
||||
prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
|
||||
nft_hash_params);
|
||||
nft_rhash_params);
|
||||
if (IS_ERR(prev))
|
||||
goto err2;
|
||||
|
||||
|
@ -134,21 +140,21 @@ err1:
|
|||
return false;
|
||||
}
|
||||
|
||||
static int nft_hash_insert(const struct net *net, const struct nft_set *set,
|
||||
const struct nft_set_elem *elem,
|
||||
struct nft_set_ext **ext)
|
||||
static int nft_rhash_insert(const struct net *net, const struct nft_set *set,
|
||||
const struct nft_set_elem *elem,
|
||||
struct nft_set_ext **ext)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
struct nft_hash_elem *he = elem->priv;
|
||||
struct nft_hash_cmp_arg arg = {
|
||||
struct nft_rhash *priv = nft_set_priv(set);
|
||||
struct nft_rhash_elem *he = elem->priv;
|
||||
struct nft_rhash_cmp_arg arg = {
|
||||
.genmask = nft_genmask_next(net),
|
||||
.set = set,
|
||||
.key = elem->key.val.data,
|
||||
};
|
||||
struct nft_hash_elem *prev;
|
||||
struct nft_rhash_elem *prev;
|
||||
|
||||
prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
|
||||
nft_hash_params);
|
||||
nft_rhash_params);
|
||||
if (IS_ERR(prev))
|
||||
return PTR_ERR(prev);
|
||||
if (prev) {
|
||||
|
@ -158,19 +164,19 @@ static int nft_hash_insert(const struct net *net, const struct nft_set *set,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nft_hash_activate(const struct net *net, const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
static void nft_rhash_activate(const struct net *net, const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_hash_elem *he = elem->priv;
|
||||
struct nft_rhash_elem *he = elem->priv;
|
||||
|
||||
nft_set_elem_change_active(net, set, &he->ext);
|
||||
nft_set_elem_clear_busy(&he->ext);
|
||||
}
|
||||
|
||||
static bool nft_hash_flush(const struct net *net,
|
||||
const struct nft_set *set, void *priv)
|
||||
static bool nft_rhash_flush(const struct net *net,
|
||||
const struct nft_set *set, void *priv)
|
||||
{
|
||||
struct nft_hash_elem *he = priv;
|
||||
struct nft_rhash_elem *he = priv;
|
||||
|
||||
if (!nft_set_elem_mark_busy(&he->ext) ||
|
||||
!nft_is_active(net, &he->ext)) {
|
||||
|
@ -180,22 +186,22 @@ static bool nft_hash_flush(const struct net *net,
|
|||
return false;
|
||||
}
|
||||
|
||||
static void *nft_hash_deactivate(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
static void *nft_rhash_deactivate(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
struct nft_hash_elem *he;
|
||||
struct nft_hash_cmp_arg arg = {
|
||||
struct nft_rhash *priv = nft_set_priv(set);
|
||||
struct nft_rhash_elem *he;
|
||||
struct nft_rhash_cmp_arg arg = {
|
||||
.genmask = nft_genmask_next(net),
|
||||
.set = set,
|
||||
.key = elem->key.val.data,
|
||||
};
|
||||
|
||||
rcu_read_lock();
|
||||
he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
|
||||
he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
|
||||
if (he != NULL &&
|
||||
!nft_hash_flush(net, set, he))
|
||||
!nft_rhash_flush(net, set, he))
|
||||
he = NULL;
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -203,21 +209,21 @@ static void *nft_hash_deactivate(const struct net *net,
|
|||
return he;
|
||||
}
|
||||
|
||||
static void nft_hash_remove(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
static void nft_rhash_remove(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
struct nft_hash_elem *he = elem->priv;
|
||||
struct nft_rhash *priv = nft_set_priv(set);
|
||||
struct nft_rhash_elem *he = elem->priv;
|
||||
|
||||
rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
|
||||
rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
|
||||
}
|
||||
|
||||
static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_iter *iter)
|
||||
static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_iter *iter)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
struct nft_hash_elem *he;
|
||||
struct nft_rhash *priv = nft_set_priv(set);
|
||||
struct nft_rhash_elem *he;
|
||||
struct rhashtable_iter hti;
|
||||
struct nft_set_elem elem;
|
||||
int err;
|
||||
|
@ -266,16 +272,16 @@ out:
|
|||
rhashtable_walk_exit(&hti);
|
||||
}
|
||||
|
||||
static void nft_hash_gc(struct work_struct *work)
|
||||
static void nft_rhash_gc(struct work_struct *work)
|
||||
{
|
||||
struct nft_set *set;
|
||||
struct nft_hash_elem *he;
|
||||
struct nft_hash *priv;
|
||||
struct nft_rhash_elem *he;
|
||||
struct nft_rhash *priv;
|
||||
struct nft_set_gc_batch *gcb = NULL;
|
||||
struct rhashtable_iter hti;
|
||||
int err;
|
||||
|
||||
priv = container_of(work, struct nft_hash, gc_work.work);
|
||||
priv = container_of(work, struct nft_rhash, gc_work.work);
|
||||
set = nft_set_container_of(priv);
|
||||
|
||||
err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
|
||||
|
@ -301,7 +307,7 @@ static void nft_hash_gc(struct work_struct *work)
|
|||
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
|
||||
if (gcb == NULL)
|
||||
goto out;
|
||||
rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
|
||||
rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
|
||||
atomic_dec(&set->nelems);
|
||||
nft_set_gc_batch_add(gcb, he);
|
||||
}
|
||||
|
@ -315,82 +321,290 @@ schedule:
|
|||
nft_set_gc_interval(set));
|
||||
}
|
||||
|
||||
static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
|
||||
static unsigned int nft_rhash_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
{
|
||||
return sizeof(struct nft_hash);
|
||||
return sizeof(struct nft_rhash);
|
||||
}
|
||||
|
||||
static const struct rhashtable_params nft_hash_params = {
|
||||
.head_offset = offsetof(struct nft_hash_elem, node),
|
||||
.hashfn = nft_hash_key,
|
||||
.obj_hashfn = nft_hash_obj,
|
||||
.obj_cmpfn = nft_hash_cmp,
|
||||
.automatic_shrinking = true,
|
||||
};
|
||||
|
||||
static int nft_hash_init(const struct nft_set *set,
|
||||
const struct nft_set_desc *desc,
|
||||
const struct nlattr * const tb[])
|
||||
static int nft_rhash_init(const struct nft_set *set,
|
||||
const struct nft_set_desc *desc,
|
||||
const struct nlattr * const tb[])
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
struct rhashtable_params params = nft_hash_params;
|
||||
struct nft_rhash *priv = nft_set_priv(set);
|
||||
struct rhashtable_params params = nft_rhash_params;
|
||||
int err;
|
||||
|
||||
params.nelem_hint = desc->size ?: NFT_HASH_ELEMENT_HINT;
|
||||
params.nelem_hint = desc->size ?: NFT_RHASH_ELEMENT_HINT;
|
||||
params.key_len = set->klen;
|
||||
|
||||
err = rhashtable_init(&priv->ht, ¶ms);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
INIT_DEFERRABLE_WORK(&priv->gc_work, nft_hash_gc);
|
||||
INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc);
|
||||
if (set->flags & NFT_SET_TIMEOUT)
|
||||
queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
|
||||
nft_set_gc_interval(set));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nft_hash_elem_destroy(void *ptr, void *arg)
|
||||
static void nft_rhash_elem_destroy(void *ptr, void *arg)
|
||||
{
|
||||
nft_set_elem_destroy(arg, ptr, true);
|
||||
}
|
||||
|
||||
static void nft_hash_destroy(const struct nft_set *set)
|
||||
static void nft_rhash_destroy(const struct nft_set *set)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
struct nft_rhash *priv = nft_set_priv(set);
|
||||
|
||||
cancel_delayed_work_sync(&priv->gc_work);
|
||||
rhashtable_free_and_destroy(&priv->ht, nft_hash_elem_destroy,
|
||||
rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
|
||||
(void *)set);
|
||||
}
|
||||
|
||||
static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
|
||||
struct nft_set_estimate *est)
|
||||
static u32 nft_hash_buckets(u32 size)
|
||||
{
|
||||
unsigned int esize;
|
||||
|
||||
esize = sizeof(struct nft_hash_elem);
|
||||
if (desc->size) {
|
||||
est->size = sizeof(struct nft_hash) +
|
||||
roundup_pow_of_two(desc->size * 4 / 3) *
|
||||
sizeof(struct nft_hash_elem *) +
|
||||
desc->size * esize;
|
||||
} else {
|
||||
/* Resizing happens when the load drops below 30% or goes
|
||||
* above 75%. The average of 52.5% load (approximated by 50%)
|
||||
* is used for the size estimation of the hash buckets,
|
||||
* meaning we calculate two buckets per element.
|
||||
*/
|
||||
est->size = esize + 2 * sizeof(struct nft_hash_elem *);
|
||||
}
|
||||
return roundup_pow_of_two(size * 4 / 3);
|
||||
}
|
||||
|
||||
static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features,
|
||||
struct nft_set_estimate *est)
|
||||
{
|
||||
est->size = ~0;
|
||||
est->lookup = NFT_SET_CLASS_O_1;
|
||||
est->space = NFT_SET_CLASS_O_N;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct nft_hash {
|
||||
u32 seed;
|
||||
u32 buckets;
|
||||
struct hlist_head table[];
|
||||
};
|
||||
|
||||
struct nft_hash_elem {
|
||||
struct hlist_node node;
|
||||
struct nft_set_ext ext;
|
||||
};
|
||||
|
||||
static bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
|
||||
const u32 *key, const struct nft_set_ext **ext)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
u8 genmask = nft_genmask_cur(net);
|
||||
const struct nft_hash_elem *he;
|
||||
u32 hash;
|
||||
|
||||
hash = jhash(key, set->klen, priv->seed);
|
||||
hash = reciprocal_scale(hash, priv->buckets);
|
||||
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
|
||||
if (!memcmp(nft_set_ext_key(&he->ext), key, set->klen) &&
|
||||
nft_set_elem_active(&he->ext, genmask)) {
|
||||
*ext = &he->ext;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* nft_hash_select_ops() makes sure key size can be either 2 or 4 bytes . */
|
||||
static inline u32 nft_hash_key(const u32 *key, u32 klen)
|
||||
{
|
||||
if (klen == 4)
|
||||
return *key;
|
||||
|
||||
return *(u16 *)key;
|
||||
}
|
||||
|
||||
static bool nft_hash_lookup_fast(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
const u32 *key, const struct nft_set_ext **ext)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
u8 genmask = nft_genmask_cur(net);
|
||||
const struct nft_hash_elem *he;
|
||||
u32 hash, k1, k2;
|
||||
|
||||
k1 = nft_hash_key(key, set->klen);
|
||||
hash = jhash_1word(k1, priv->seed);
|
||||
hash = reciprocal_scale(hash, priv->buckets);
|
||||
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
|
||||
k2 = nft_hash_key(nft_set_ext_key(&he->ext)->data, set->klen);
|
||||
if (k1 == k2 &&
|
||||
nft_set_elem_active(&he->ext, genmask)) {
|
||||
*ext = &he->ext;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int nft_hash_insert(const struct net *net, const struct nft_set *set,
|
||||
const struct nft_set_elem *elem,
|
||||
struct nft_set_ext **ext)
|
||||
{
|
||||
struct nft_hash_elem *this = elem->priv, *he;
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
u32 hash;
|
||||
|
||||
hash = jhash(nft_set_ext_key(&this->ext), set->klen, priv->seed);
|
||||
hash = reciprocal_scale(hash, priv->buckets);
|
||||
hlist_for_each_entry(he, &priv->table[hash], node) {
|
||||
if (!memcmp(nft_set_ext_key(&this->ext),
|
||||
nft_set_ext_key(&he->ext), set->klen) &&
|
||||
nft_set_elem_active(&he->ext, genmask)) {
|
||||
*ext = &he->ext;
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
hlist_add_head_rcu(&this->node, &priv->table[hash]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nft_hash_activate(const struct net *net, const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_hash_elem *he = elem->priv;
|
||||
|
||||
nft_set_elem_change_active(net, set, &he->ext);
|
||||
}
|
||||
|
||||
static bool nft_hash_flush(const struct net *net,
|
||||
const struct nft_set *set, void *priv)
|
||||
{
|
||||
struct nft_hash_elem *he = priv;
|
||||
|
||||
nft_set_elem_change_active(net, set, &he->ext);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *nft_hash_deactivate(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
struct nft_hash_elem *this = elem->priv, *he;
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
u32 hash;
|
||||
|
||||
hash = jhash(nft_set_ext_key(&this->ext), set->klen, priv->seed);
|
||||
hash = reciprocal_scale(hash, priv->buckets);
|
||||
hlist_for_each_entry(he, &priv->table[hash], node) {
|
||||
if (!memcmp(nft_set_ext_key(&this->ext), &elem->key.val,
|
||||
set->klen) ||
|
||||
nft_set_elem_active(&he->ext, genmask)) {
|
||||
nft_set_elem_change_active(net, set, &he->ext);
|
||||
return he;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void nft_hash_remove(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_hash_elem *he = elem->priv;
|
||||
|
||||
hlist_del_rcu(&he->node);
|
||||
}
|
||||
|
||||
static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_iter *iter)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
struct nft_hash_elem *he;
|
||||
struct nft_set_elem elem;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->buckets; i++) {
|
||||
hlist_for_each_entry_rcu(he, &priv->table[i], node) {
|
||||
if (iter->count < iter->skip)
|
||||
goto cont;
|
||||
if (!nft_set_elem_active(&he->ext, iter->genmask))
|
||||
goto cont;
|
||||
|
||||
elem.priv = he;
|
||||
|
||||
iter->err = iter->fn(ctx, set, iter, &elem);
|
||||
if (iter->err < 0)
|
||||
return;
|
||||
cont:
|
||||
iter->count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int nft_hash_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
{
|
||||
return sizeof(struct nft_hash) +
|
||||
nft_hash_buckets(desc->size) * sizeof(struct hlist_head);
|
||||
}
|
||||
|
||||
static int nft_hash_init(const struct nft_set *set,
|
||||
const struct nft_set_desc *desc,
|
||||
const struct nlattr * const tb[])
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
|
||||
priv->buckets = nft_hash_buckets(desc->size);
|
||||
get_random_bytes(&priv->seed, sizeof(priv->seed));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nft_hash_destroy(const struct nft_set *set)
|
||||
{
|
||||
struct nft_hash *priv = nft_set_priv(set);
|
||||
struct nft_hash_elem *he;
|
||||
struct hlist_node *next;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->buckets; i++) {
|
||||
hlist_for_each_entry_safe(he, next, &priv->table[i], node) {
|
||||
hlist_del_rcu(&he->node);
|
||||
nft_set_elem_destroy(set, he, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
|
||||
struct nft_set_estimate *est)
|
||||
{
|
||||
est->size = sizeof(struct nft_hash) +
|
||||
nft_hash_buckets(desc->size) * sizeof(struct hlist_head) +
|
||||
desc->size * sizeof(struct nft_hash_elem);
|
||||
est->lookup = NFT_SET_CLASS_O_1;
|
||||
est->space = NFT_SET_CLASS_O_N;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct nft_set_type nft_hash_type;
|
||||
static struct nft_set_ops nft_rhash_ops __read_mostly = {
|
||||
.type = &nft_hash_type,
|
||||
.privsize = nft_rhash_privsize,
|
||||
.elemsize = offsetof(struct nft_rhash_elem, ext),
|
||||
.estimate = nft_rhash_estimate,
|
||||
.init = nft_rhash_init,
|
||||
.destroy = nft_rhash_destroy,
|
||||
.insert = nft_rhash_insert,
|
||||
.activate = nft_rhash_activate,
|
||||
.deactivate = nft_rhash_deactivate,
|
||||
.flush = nft_rhash_flush,
|
||||
.remove = nft_rhash_remove,
|
||||
.lookup = nft_rhash_lookup,
|
||||
.update = nft_rhash_update,
|
||||
.walk = nft_rhash_walk,
|
||||
.features = NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
|
||||
};
|
||||
|
||||
static struct nft_set_ops nft_hash_ops __read_mostly = {
|
||||
.type = &nft_hash_type,
|
||||
.privsize = nft_hash_privsize,
|
||||
.elemsize = offsetof(struct nft_hash_elem, ext),
|
||||
.estimate = nft_hash_estimate,
|
||||
|
@ -402,20 +616,57 @@ static struct nft_set_ops nft_hash_ops __read_mostly = {
|
|||
.flush = nft_hash_flush,
|
||||
.remove = nft_hash_remove,
|
||||
.lookup = nft_hash_lookup,
|
||||
.update = nft_hash_update,
|
||||
.walk = nft_hash_walk,
|
||||
.features = NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
|
||||
.features = NFT_SET_MAP | NFT_SET_OBJECT,
|
||||
};
|
||||
|
||||
static struct nft_set_ops nft_hash_fast_ops __read_mostly = {
|
||||
.type = &nft_hash_type,
|
||||
.privsize = nft_hash_privsize,
|
||||
.elemsize = offsetof(struct nft_hash_elem, ext),
|
||||
.estimate = nft_hash_estimate,
|
||||
.init = nft_hash_init,
|
||||
.destroy = nft_hash_destroy,
|
||||
.insert = nft_hash_insert,
|
||||
.activate = nft_hash_activate,
|
||||
.deactivate = nft_hash_deactivate,
|
||||
.flush = nft_hash_flush,
|
||||
.remove = nft_hash_remove,
|
||||
.lookup = nft_hash_lookup_fast,
|
||||
.walk = nft_hash_walk,
|
||||
.features = NFT_SET_MAP | NFT_SET_OBJECT,
|
||||
};
|
||||
|
||||
static const struct nft_set_ops *
|
||||
nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc,
|
||||
u32 flags)
|
||||
{
|
||||
if (desc->size) {
|
||||
switch (desc->klen) {
|
||||
case 2:
|
||||
case 4:
|
||||
return &nft_hash_fast_ops;
|
||||
default:
|
||||
return &nft_hash_ops;
|
||||
}
|
||||
}
|
||||
|
||||
return &nft_rhash_ops;
|
||||
}
|
||||
|
||||
static struct nft_set_type nft_hash_type __read_mostly = {
|
||||
.select_ops = nft_hash_select_ops,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init nft_hash_module_init(void)
|
||||
{
|
||||
return nft_register_set(&nft_hash_ops);
|
||||
return nft_register_set(&nft_hash_type);
|
||||
}
|
||||
|
||||
static void __exit nft_hash_module_exit(void)
|
||||
{
|
||||
nft_unregister_set(&nft_hash_ops);
|
||||
nft_unregister_set(&nft_hash_type);
|
||||
}
|
||||
|
||||
module_init(nft_hash_module_init);
|
||||
|
|
|
@ -251,7 +251,8 @@ cont:
|
|||
read_unlock_bh(&priv->lock);
|
||||
}
|
||||
|
||||
static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
|
||||
static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[],
|
||||
const struct nft_set_desc *desc)
|
||||
{
|
||||
return sizeof(struct nft_rbtree);
|
||||
}
|
||||
|
@ -283,13 +284,11 @@ static void nft_rbtree_destroy(const struct nft_set *set)
|
|||
static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
|
||||
struct nft_set_estimate *est)
|
||||
{
|
||||
unsigned int nsize;
|
||||
|
||||
nsize = sizeof(struct nft_rbtree_elem);
|
||||
if (desc->size)
|
||||
est->size = sizeof(struct nft_rbtree) + desc->size * nsize;
|
||||
est->size = sizeof(struct nft_rbtree) +
|
||||
desc->size * sizeof(struct nft_rbtree_elem);
|
||||
else
|
||||
est->size = nsize;
|
||||
est->size = ~0;
|
||||
|
||||
est->lookup = NFT_SET_CLASS_O_LOG_N;
|
||||
est->space = NFT_SET_CLASS_O_N;
|
||||
|
@ -297,7 +296,9 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct nft_set_type nft_rbtree_type;
|
||||
static struct nft_set_ops nft_rbtree_ops __read_mostly = {
|
||||
.type = &nft_rbtree_type,
|
||||
.privsize = nft_rbtree_privsize,
|
||||
.elemsize = offsetof(struct nft_rbtree_elem, ext),
|
||||
.estimate = nft_rbtree_estimate,
|
||||
|
@ -311,17 +312,21 @@ static struct nft_set_ops nft_rbtree_ops __read_mostly = {
|
|||
.lookup = nft_rbtree_lookup,
|
||||
.walk = nft_rbtree_walk,
|
||||
.features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT,
|
||||
};
|
||||
|
||||
static struct nft_set_type nft_rbtree_type __read_mostly = {
|
||||
.ops = &nft_rbtree_ops,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init nft_rbtree_module_init(void)
|
||||
{
|
||||
return nft_register_set(&nft_rbtree_ops);
|
||||
return nft_register_set(&nft_rbtree_type);
|
||||
}
|
||||
|
||||
static void __exit nft_rbtree_module_exit(void)
|
||||
{
|
||||
nft_unregister_set(&nft_rbtree_ops);
|
||||
nft_unregister_set(&nft_rbtree_type);
|
||||
}
|
||||
|
||||
module_init(nft_rbtree_module_init);
|
||||
|
|
|
@ -63,7 +63,8 @@ static const struct nla_policy xt_osf_policy[OSF_ATTR_MAX + 1] = {
|
|||
|
||||
static int xt_osf_add_callback(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const osf_attrs[])
|
||||
const struct nlattr * const osf_attrs[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct xt_osf_user_finger *f;
|
||||
struct xt_osf_finger *kf = NULL, *sf;
|
||||
|
@ -107,7 +108,8 @@ static int xt_osf_add_callback(struct net *net, struct sock *ctnl,
|
|||
static int xt_osf_remove_callback(struct net *net, struct sock *ctnl,
|
||||
struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const osf_attrs[])
|
||||
const struct nlattr * const osf_attrs[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct xt_osf_user_finger *f;
|
||||
struct xt_osf_finger *sf;
|
||||
|
|
Loading…
Reference in New Issue