netfilter pull request 23-08-10
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEN9lkrMBJgcdVAPub1V2XiooUIOQFAmTUhbUACgkQ1V2XiooU IORz1Q//a2fDuMsK5iW1BlF4y0P9aQUSVV//r3DYaoYOspJhsB2yZu4HtL+XQJvY yncwg+ub24yQh5sUNSJnZztQVTN+NPY9Vl2TkXXMx6Wxs2XenmgzZmDdghUDzhTd DuOjIGVEJ2M6XpPAOub89sqL+E0K7J0/q0aIcV0K0/xKo7U/z3vgLv4aZx/ZjPCV daj3gcGpYQ1JJ9pi2se2yh89dzT321U7yYde9ek0TUeKGdCFJkfHkqMurwbcgoJ8 jkx5NOtrp+GLbhd+ME86IUtD+Edm46+bJUxvG0My99CVlak7y5gJh/aPxpAPACuW NhWWy26kivVRWyttLQk0ScZfbO1CIwvaPzQC+QdlFdNA1eWTMhEk6AG2dVaU9CNB V9WKWv59CPaDwPCKhXiPLQ9J+Kds7oyHPXGlV2dDOuSmJ9QbOh/HBQGEm/mI93qX Fr+qqP3A9/juXZ5FdSLT2pJPuVlXdhQdgyHgiunyDPHoL9q7GFn5aQL/BVKE23tc bgMez0GKzBR0waS9cycFSVls1rQN1XUIdoD6SLaRYq9FkKcCx+YGn3LH44Y1feL/ UnLMFlt9xIG4dPbGcGGy4r7mB53JpglHEqJEftvsNcBEd/r/f+4JP+/fa9FJ70uZ GpGmv7Wo5DZT5V8LaMeWDWpJl6G7UcxrFOyDTw27l2OOVNaD2Ic= =KNf7 -----END PGP SIGNATURE----- Merge tag 'nf-23-08-10' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf Pablo Neira Ayuso says: ==================== Netfilter fixes for net The existing attempt to resolve races between control plane and GC work is error prone, as reported by Bien Pham <phamnnb@sea.com>, some places forgot to call nft_set_elem_mark_busy(), leading to double-deactivation of elements. This series contains the following patches: 1) Do not skip expired elements during walk otherwise elements might never decrement the reference counter on data, leading to memleak. 2) Add a GC transaction API to replace the former attempt to deal with races between control plane and GC. GC worker sets on NFT_SET_ELEM_DEAD_BIT on elements and it creates a GC transaction to remove the expired elements, GC transaction could abort in case of interference with control plane and retried later (GC async). Set backends such as rbtree and pipapo also perform GC from control plane (GC sync), in such case, element deactivation and removal is safe because mutex is held then collected elements are released via call_rcu(). 3) Adapt existing set backends to use the GC transaction API. 4) Update rhash set backend to set on _DEAD bit to report deleted elements from datapath for GC. 5) Remove old GC batch API and the NFT_SET_ELEM_BUSY_BIT. * tag 'nf-23-08-10' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf: netfilter: nf_tables: remove busy mark and gc batch API netfilter: nft_set_hash: mark set element as dead when deleting from packet path netfilter: nf_tables: adapt set backend to use GC transaction API netfilter: nf_tables: GC transaction API to avoid race with control plane netfilter: nf_tables: don't skip expired elements during walk ==================== Link: https://lore.kernel.org/r/20230810070830.24064-1-pablo@netfilter.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
3e91b0ebd9
|
@ -512,6 +512,7 @@ struct nft_set_elem_expr {
|
|||
*
|
||||
* @list: table set list node
|
||||
* @bindings: list of set bindings
|
||||
* @refs: internal refcounting for async set destruction
|
||||
* @table: table this set belongs to
|
||||
* @net: netnamespace this set belongs to
|
||||
* @name: name of the set
|
||||
|
@ -541,6 +542,7 @@ struct nft_set_elem_expr {
|
|||
struct nft_set {
|
||||
struct list_head list;
|
||||
struct list_head bindings;
|
||||
refcount_t refs;
|
||||
struct nft_table *table;
|
||||
possible_net_t net;
|
||||
char *name;
|
||||
|
@ -562,7 +564,8 @@ struct nft_set {
|
|||
struct list_head pending_update;
|
||||
/* runtime data below here */
|
||||
const struct nft_set_ops *ops ____cacheline_aligned;
|
||||
u16 flags:14,
|
||||
u16 flags:13,
|
||||
dead:1,
|
||||
genmask:2;
|
||||
u8 klen;
|
||||
u8 dlen;
|
||||
|
@ -596,7 +599,6 @@ struct nft_set *nft_set_lookup_global(const struct net *net,
|
|||
|
||||
struct nft_set_ext *nft_set_catchall_lookup(const struct net *net,
|
||||
const struct nft_set *set);
|
||||
void *nft_set_catchall_gc(const struct nft_set *set);
|
||||
|
||||
static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
|
||||
{
|
||||
|
@ -813,62 +815,6 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
|
|||
void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
|
||||
const struct nft_set *set, void *elem);
|
||||
|
||||
/**
|
||||
* struct nft_set_gc_batch_head - nf_tables set garbage collection batch
|
||||
*
|
||||
* @rcu: rcu head
|
||||
* @set: set the elements belong to
|
||||
* @cnt: count of elements
|
||||
*/
|
||||
struct nft_set_gc_batch_head {
|
||||
struct rcu_head rcu;
|
||||
const struct nft_set *set;
|
||||
unsigned int cnt;
|
||||
};
|
||||
|
||||
#define NFT_SET_GC_BATCH_SIZE ((PAGE_SIZE - \
|
||||
sizeof(struct nft_set_gc_batch_head)) / \
|
||||
sizeof(void *))
|
||||
|
||||
/**
|
||||
* struct nft_set_gc_batch - nf_tables set garbage collection batch
|
||||
*
|
||||
* @head: GC batch head
|
||||
* @elems: garbage collection elements
|
||||
*/
|
||||
struct nft_set_gc_batch {
|
||||
struct nft_set_gc_batch_head head;
|
||||
void *elems[NFT_SET_GC_BATCH_SIZE];
|
||||
};
|
||||
|
||||
struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
|
||||
gfp_t gfp);
|
||||
void nft_set_gc_batch_release(struct rcu_head *rcu);
|
||||
|
||||
static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb)
|
||||
{
|
||||
if (gcb != NULL)
|
||||
call_rcu(&gcb->head.rcu, nft_set_gc_batch_release);
|
||||
}
|
||||
|
||||
static inline struct nft_set_gc_batch *
|
||||
nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb,
|
||||
gfp_t gfp)
|
||||
{
|
||||
if (gcb != NULL) {
|
||||
if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems))
|
||||
return gcb;
|
||||
nft_set_gc_batch_complete(gcb);
|
||||
}
|
||||
return nft_set_gc_batch_alloc(set, gfp);
|
||||
}
|
||||
|
||||
static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
|
||||
void *elem)
|
||||
{
|
||||
gcb->elems[gcb->head.cnt++] = elem;
|
||||
}
|
||||
|
||||
struct nft_expr_ops;
|
||||
/**
|
||||
* struct nft_expr_type - nf_tables expression type
|
||||
|
@ -1557,39 +1503,30 @@ static inline void nft_set_elem_change_active(const struct net *net,
|
|||
|
||||
#endif /* IS_ENABLED(CONFIG_NF_TABLES) */
|
||||
|
||||
/*
|
||||
* We use a free bit in the genmask field to indicate the element
|
||||
* is busy, meaning it is currently being processed either by
|
||||
* the netlink API or GC.
|
||||
*
|
||||
* Even though the genmask is only a single byte wide, this works
|
||||
* because the extension structure if fully constant once initialized,
|
||||
* so there are no non-atomic write accesses unless it is already
|
||||
* marked busy.
|
||||
*/
|
||||
#define NFT_SET_ELEM_BUSY_MASK (1 << 2)
|
||||
#define NFT_SET_ELEM_DEAD_MASK (1 << 2)
|
||||
|
||||
#if defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
#define NFT_SET_ELEM_BUSY_BIT 2
|
||||
#define NFT_SET_ELEM_DEAD_BIT 2
|
||||
#elif defined(__BIG_ENDIAN_BITFIELD)
|
||||
#define NFT_SET_ELEM_BUSY_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
|
||||
#define NFT_SET_ELEM_DEAD_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
|
||||
#else
|
||||
#error
|
||||
#endif
|
||||
|
||||
static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext)
|
||||
static inline void nft_set_elem_dead(struct nft_set_ext *ext)
|
||||
{
|
||||
unsigned long *word = (unsigned long *)ext;
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
|
||||
return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word);
|
||||
set_bit(NFT_SET_ELEM_DEAD_BIT, word);
|
||||
}
|
||||
|
||||
static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
|
||||
static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext)
|
||||
{
|
||||
unsigned long *word = (unsigned long *)ext;
|
||||
|
||||
clear_bit(NFT_SET_ELEM_BUSY_BIT, word);
|
||||
BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
|
||||
return test_bit(NFT_SET_ELEM_DEAD_BIT, word);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1732,6 +1669,38 @@ struct nft_trans_flowtable {
|
|||
#define nft_trans_flowtable_flags(trans) \
|
||||
(((struct nft_trans_flowtable *)trans->data)->flags)
|
||||
|
||||
#define NFT_TRANS_GC_BATCHCOUNT 256
|
||||
|
||||
struct nft_trans_gc {
|
||||
struct list_head list;
|
||||
struct net *net;
|
||||
struct nft_set *set;
|
||||
u32 seq;
|
||||
u8 count;
|
||||
void *priv[NFT_TRANS_GC_BATCHCOUNT];
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
|
||||
unsigned int gc_seq, gfp_t gfp);
|
||||
void nft_trans_gc_destroy(struct nft_trans_gc *trans);
|
||||
|
||||
struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
|
||||
unsigned int gc_seq, gfp_t gfp);
|
||||
void nft_trans_gc_queue_async_done(struct nft_trans_gc *gc);
|
||||
|
||||
struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp);
|
||||
void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans);
|
||||
|
||||
void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv);
|
||||
|
||||
struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
|
||||
unsigned int gc_seq);
|
||||
|
||||
void nft_setelem_data_deactivate(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
struct nft_set_elem *elem);
|
||||
|
||||
int __init nft_chain_filter_init(void);
|
||||
void nft_chain_filter_fini(void);
|
||||
|
||||
|
@ -1758,6 +1727,7 @@ struct nftables_pernet {
|
|||
struct mutex commit_mutex;
|
||||
u64 table_handle;
|
||||
unsigned int base_seq;
|
||||
unsigned int gc_seq;
|
||||
};
|
||||
|
||||
extern unsigned int nf_tables_net_id;
|
||||
|
|
|
@ -31,7 +31,9 @@ static LIST_HEAD(nf_tables_expressions);
|
|||
static LIST_HEAD(nf_tables_objects);
|
||||
static LIST_HEAD(nf_tables_flowtables);
|
||||
static LIST_HEAD(nf_tables_destroy_list);
|
||||
static LIST_HEAD(nf_tables_gc_list);
|
||||
static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
|
||||
static DEFINE_SPINLOCK(nf_tables_gc_list_lock);
|
||||
|
||||
enum {
|
||||
NFT_VALIDATE_SKIP = 0,
|
||||
|
@ -120,6 +122,9 @@ static void nft_validate_state_update(struct nft_table *table, u8 new_validate_s
|
|||
static void nf_tables_trans_destroy_work(struct work_struct *w);
|
||||
static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work);
|
||||
|
||||
static void nft_trans_gc_work(struct work_struct *work);
|
||||
static DECLARE_WORK(trans_gc_work, nft_trans_gc_work);
|
||||
|
||||
static void nft_ctx_init(struct nft_ctx *ctx,
|
||||
struct net *net,
|
||||
const struct sk_buff *skb,
|
||||
|
@ -582,10 +587,6 @@ static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
|
|||
return __nft_trans_set_add(ctx, msg_type, set, NULL);
|
||||
}
|
||||
|
||||
static void nft_setelem_data_deactivate(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
struct nft_set_elem *elem);
|
||||
|
||||
static int nft_mapelem_deactivate(const struct nft_ctx *ctx,
|
||||
struct nft_set *set,
|
||||
const struct nft_set_iter *iter,
|
||||
|
@ -5055,6 +5056,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
|
|||
|
||||
INIT_LIST_HEAD(&set->bindings);
|
||||
INIT_LIST_HEAD(&set->catchall_list);
|
||||
refcount_set(&set->refs, 1);
|
||||
set->table = table;
|
||||
write_pnet(&set->net, net);
|
||||
set->ops = ops;
|
||||
|
@ -5122,6 +5124,14 @@ static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
|
|||
}
|
||||
}
|
||||
|
||||
static void nft_set_put(struct nft_set *set)
|
||||
{
|
||||
if (refcount_dec_and_test(&set->refs)) {
|
||||
kfree(set->name);
|
||||
kvfree(set);
|
||||
}
|
||||
}
|
||||
|
||||
static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
int i;
|
||||
|
@ -5134,8 +5144,7 @@ static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
|
|||
|
||||
set->ops->destroy(ctx, set);
|
||||
nft_set_catchall_destroy(ctx, set);
|
||||
kfree(set->name);
|
||||
kvfree(set);
|
||||
nft_set_put(set);
|
||||
}
|
||||
|
||||
static int nf_tables_delset(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
|
@ -5602,8 +5611,12 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
|
|||
const struct nft_set_iter *iter,
|
||||
struct nft_set_elem *elem)
|
||||
{
|
||||
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
|
||||
struct nft_set_dump_args *args;
|
||||
|
||||
if (nft_set_elem_expired(ext))
|
||||
return 0;
|
||||
|
||||
args = container_of(iter, struct nft_set_dump_args, iter);
|
||||
return nf_tables_fill_setelem(args->skb, set, elem, args->reset);
|
||||
}
|
||||
|
@ -6274,7 +6287,8 @@ struct nft_set_ext *nft_set_catchall_lookup(const struct net *net,
|
|||
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
|
||||
ext = nft_set_elem_ext(set, catchall->elem);
|
||||
if (nft_set_elem_active(ext, genmask) &&
|
||||
!nft_set_elem_expired(ext))
|
||||
!nft_set_elem_expired(ext) &&
|
||||
!nft_set_elem_is_dead(ext))
|
||||
return ext;
|
||||
}
|
||||
|
||||
|
@ -6282,29 +6296,6 @@ struct nft_set_ext *nft_set_catchall_lookup(const struct net *net,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nft_set_catchall_lookup);
|
||||
|
||||
void *nft_set_catchall_gc(const struct nft_set *set)
|
||||
{
|
||||
struct nft_set_elem_catchall *catchall, *next;
|
||||
struct nft_set_ext *ext;
|
||||
void *elem = NULL;
|
||||
|
||||
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
|
||||
ext = nft_set_elem_ext(set, catchall->elem);
|
||||
|
||||
if (!nft_set_elem_expired(ext) ||
|
||||
nft_set_elem_mark_busy(ext))
|
||||
continue;
|
||||
|
||||
elem = catchall->elem;
|
||||
list_del_rcu(&catchall->list);
|
||||
kfree_rcu(catchall, rcu);
|
||||
break;
|
||||
}
|
||||
|
||||
return elem;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nft_set_catchall_gc);
|
||||
|
||||
static int nft_setelem_catchall_insert(const struct net *net,
|
||||
struct nft_set *set,
|
||||
const struct nft_set_elem *elem,
|
||||
|
@ -6366,7 +6357,6 @@ static void nft_setelem_activate(struct net *net, struct nft_set *set,
|
|||
|
||||
if (nft_setelem_is_catchall(set, elem)) {
|
||||
nft_set_elem_change_active(net, set, ext);
|
||||
nft_set_elem_clear_busy(ext);
|
||||
} else {
|
||||
set->ops->activate(net, set, elem);
|
||||
}
|
||||
|
@ -6381,8 +6371,7 @@ static int nft_setelem_catchall_deactivate(const struct net *net,
|
|||
|
||||
list_for_each_entry(catchall, &set->catchall_list, list) {
|
||||
ext = nft_set_elem_ext(set, catchall->elem);
|
||||
if (!nft_is_active(net, ext) ||
|
||||
nft_set_elem_mark_busy(ext))
|
||||
if (!nft_is_active(net, ext))
|
||||
continue;
|
||||
|
||||
kfree(elem->priv);
|
||||
|
@ -6777,7 +6766,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
|||
goto err_elem_free;
|
||||
}
|
||||
|
||||
ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
|
||||
ext->genmask = nft_genmask_cur(ctx->net);
|
||||
|
||||
err = nft_setelem_insert(ctx->net, set, &elem, &ext2, flags);
|
||||
if (err) {
|
||||
|
@ -6929,9 +6918,9 @@ static void nft_setelem_data_activate(const struct net *net,
|
|||
nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use);
|
||||
}
|
||||
|
||||
static void nft_setelem_data_deactivate(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
struct nft_set_elem *elem)
|
||||
void nft_setelem_data_deactivate(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
struct nft_set_elem *elem)
|
||||
{
|
||||
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
|
||||
|
||||
|
@ -7095,8 +7084,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
|
|||
|
||||
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
|
||||
ext = nft_set_elem_ext(set, catchall->elem);
|
||||
if (!nft_set_elem_active(ext, genmask) ||
|
||||
nft_set_elem_mark_busy(ext))
|
||||
if (!nft_set_elem_active(ext, genmask))
|
||||
continue;
|
||||
|
||||
elem.priv = catchall->elem;
|
||||
|
@ -7170,29 +7158,6 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
|
|||
return err;
|
||||
}
|
||||
|
||||
void nft_set_gc_batch_release(struct rcu_head *rcu)
|
||||
{
|
||||
struct nft_set_gc_batch *gcb;
|
||||
unsigned int i;
|
||||
|
||||
gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu);
|
||||
for (i = 0; i < gcb->head.cnt; i++)
|
||||
nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true);
|
||||
kfree(gcb);
|
||||
}
|
||||
|
||||
struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct nft_set_gc_batch *gcb;
|
||||
|
||||
gcb = kzalloc(sizeof(*gcb), gfp);
|
||||
if (gcb == NULL)
|
||||
return gcb;
|
||||
gcb->head.set = set;
|
||||
return gcb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stateful objects
|
||||
*/
|
||||
|
@ -9414,6 +9379,207 @@ void nft_chain_del(struct nft_chain *chain)
|
|||
list_del_rcu(&chain->list);
|
||||
}
|
||||
|
||||
static void nft_trans_gc_setelem_remove(struct nft_ctx *ctx,
|
||||
struct nft_trans_gc *trans)
|
||||
{
|
||||
void **priv = trans->priv;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < trans->count; i++) {
|
||||
struct nft_set_elem elem = {
|
||||
.priv = priv[i],
|
||||
};
|
||||
|
||||
nft_setelem_data_deactivate(ctx->net, trans->set, &elem);
|
||||
nft_setelem_remove(ctx->net, trans->set, &elem);
|
||||
}
|
||||
}
|
||||
|
||||
void nft_trans_gc_destroy(struct nft_trans_gc *trans)
|
||||
{
|
||||
nft_set_put(trans->set);
|
||||
put_net(trans->net);
|
||||
kfree(trans);
|
||||
}
|
||||
|
||||
static void nft_trans_gc_trans_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct nft_set_elem elem = {};
|
||||
struct nft_trans_gc *trans;
|
||||
struct nft_ctx ctx = {};
|
||||
unsigned int i;
|
||||
|
||||
trans = container_of(rcu, struct nft_trans_gc, rcu);
|
||||
ctx.net = read_pnet(&trans->set->net);
|
||||
|
||||
for (i = 0; i < trans->count; i++) {
|
||||
elem.priv = trans->priv[i];
|
||||
if (!nft_setelem_is_catchall(trans->set, &elem))
|
||||
atomic_dec(&trans->set->nelems);
|
||||
|
||||
nf_tables_set_elem_destroy(&ctx, trans->set, elem.priv);
|
||||
}
|
||||
|
||||
nft_trans_gc_destroy(trans);
|
||||
}
|
||||
|
||||
static bool nft_trans_gc_work_done(struct nft_trans_gc *trans)
|
||||
{
|
||||
struct nftables_pernet *nft_net;
|
||||
struct nft_ctx ctx = {};
|
||||
|
||||
nft_net = nft_pernet(trans->net);
|
||||
|
||||
mutex_lock(&nft_net->commit_mutex);
|
||||
|
||||
/* Check for race with transaction, otherwise this batch refers to
|
||||
* stale objects that might not be there anymore. Skip transaction if
|
||||
* set has been destroyed from control plane transaction in case gc
|
||||
* worker loses race.
|
||||
*/
|
||||
if (READ_ONCE(nft_net->gc_seq) != trans->seq || trans->set->dead) {
|
||||
mutex_unlock(&nft_net->commit_mutex);
|
||||
return false;
|
||||
}
|
||||
|
||||
ctx.net = trans->net;
|
||||
ctx.table = trans->set->table;
|
||||
|
||||
nft_trans_gc_setelem_remove(&ctx, trans);
|
||||
mutex_unlock(&nft_net->commit_mutex);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nft_trans_gc_work(struct work_struct *work)
|
||||
{
|
||||
struct nft_trans_gc *trans, *next;
|
||||
LIST_HEAD(trans_gc_list);
|
||||
|
||||
spin_lock(&nf_tables_destroy_list_lock);
|
||||
list_splice_init(&nf_tables_gc_list, &trans_gc_list);
|
||||
spin_unlock(&nf_tables_destroy_list_lock);
|
||||
|
||||
list_for_each_entry_safe(trans, next, &trans_gc_list, list) {
|
||||
list_del(&trans->list);
|
||||
if (!nft_trans_gc_work_done(trans)) {
|
||||
nft_trans_gc_destroy(trans);
|
||||
continue;
|
||||
}
|
||||
call_rcu(&trans->rcu, nft_trans_gc_trans_free);
|
||||
}
|
||||
}
|
||||
|
||||
struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
|
||||
unsigned int gc_seq, gfp_t gfp)
|
||||
{
|
||||
struct net *net = read_pnet(&set->net);
|
||||
struct nft_trans_gc *trans;
|
||||
|
||||
trans = kzalloc(sizeof(*trans), gfp);
|
||||
if (!trans)
|
||||
return NULL;
|
||||
|
||||
refcount_inc(&set->refs);
|
||||
trans->set = set;
|
||||
trans->net = get_net(net);
|
||||
trans->seq = gc_seq;
|
||||
|
||||
return trans;
|
||||
}
|
||||
|
||||
void nft_trans_gc_elem_add(struct nft_trans_gc *trans, void *priv)
|
||||
{
|
||||
trans->priv[trans->count++] = priv;
|
||||
}
|
||||
|
||||
static void nft_trans_gc_queue_work(struct nft_trans_gc *trans)
|
||||
{
|
||||
spin_lock(&nf_tables_gc_list_lock);
|
||||
list_add_tail(&trans->list, &nf_tables_gc_list);
|
||||
spin_unlock(&nf_tables_gc_list_lock);
|
||||
|
||||
schedule_work(&trans_gc_work);
|
||||
}
|
||||
|
||||
static int nft_trans_gc_space(struct nft_trans_gc *trans)
|
||||
{
|
||||
return NFT_TRANS_GC_BATCHCOUNT - trans->count;
|
||||
}
|
||||
|
||||
struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
|
||||
unsigned int gc_seq, gfp_t gfp)
|
||||
{
|
||||
if (nft_trans_gc_space(gc))
|
||||
return gc;
|
||||
|
||||
nft_trans_gc_queue_work(gc);
|
||||
|
||||
return nft_trans_gc_alloc(gc->set, gc_seq, gfp);
|
||||
}
|
||||
|
||||
void nft_trans_gc_queue_async_done(struct nft_trans_gc *trans)
|
||||
{
|
||||
if (trans->count == 0) {
|
||||
nft_trans_gc_destroy(trans);
|
||||
return;
|
||||
}
|
||||
|
||||
nft_trans_gc_queue_work(trans);
|
||||
}
|
||||
|
||||
struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp)
|
||||
{
|
||||
if (WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net)))
|
||||
return NULL;
|
||||
|
||||
if (nft_trans_gc_space(gc))
|
||||
return gc;
|
||||
|
||||
call_rcu(&gc->rcu, nft_trans_gc_trans_free);
|
||||
|
||||
return nft_trans_gc_alloc(gc->set, 0, gfp);
|
||||
}
|
||||
|
||||
void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
|
||||
{
|
||||
WARN_ON_ONCE(!lockdep_commit_lock_is_held(trans->net));
|
||||
|
||||
if (trans->count == 0) {
|
||||
nft_trans_gc_destroy(trans);
|
||||
return;
|
||||
}
|
||||
|
||||
call_rcu(&trans->rcu, nft_trans_gc_trans_free);
|
||||
}
|
||||
|
||||
struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
|
||||
unsigned int gc_seq)
|
||||
{
|
||||
struct nft_set_elem_catchall *catchall;
|
||||
const struct nft_set *set = gc->set;
|
||||
struct nft_set_ext *ext;
|
||||
|
||||
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
|
||||
ext = nft_set_elem_ext(set, catchall->elem);
|
||||
|
||||
if (!nft_set_elem_expired(ext))
|
||||
continue;
|
||||
if (nft_set_elem_is_dead(ext))
|
||||
goto dead_elem;
|
||||
|
||||
nft_set_elem_dead(ext);
|
||||
dead_elem:
|
||||
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
|
||||
if (!gc)
|
||||
return NULL;
|
||||
|
||||
nft_trans_gc_elem_add(gc, catchall->elem);
|
||||
}
|
||||
|
||||
return gc;
|
||||
}
|
||||
|
||||
static void nf_tables_module_autoload_cleanup(struct net *net)
|
||||
{
|
||||
struct nftables_pernet *nft_net = nft_pernet(net);
|
||||
|
@ -9576,11 +9742,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|||
{
|
||||
struct nftables_pernet *nft_net = nft_pernet(net);
|
||||
struct nft_trans *trans, *next;
|
||||
unsigned int base_seq, gc_seq;
|
||||
LIST_HEAD(set_update_list);
|
||||
struct nft_trans_elem *te;
|
||||
struct nft_chain *chain;
|
||||
struct nft_table *table;
|
||||
unsigned int base_seq;
|
||||
LIST_HEAD(adl);
|
||||
int err;
|
||||
|
||||
|
@ -9657,6 +9823,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|||
|
||||
WRITE_ONCE(nft_net->base_seq, base_seq);
|
||||
|
||||
/* Bump gc counter, it becomes odd, this is the busy mark. */
|
||||
gc_seq = READ_ONCE(nft_net->gc_seq);
|
||||
WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
|
||||
|
||||
/* step 3. Start new generation, rules_gen_X now in use. */
|
||||
net->nft.gencursor = nft_gencursor_next(net);
|
||||
|
||||
|
@ -9764,6 +9934,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|||
break;
|
||||
case NFT_MSG_DELSET:
|
||||
case NFT_MSG_DESTROYSET:
|
||||
nft_trans_set(trans)->dead = 1;
|
||||
list_del_rcu(&nft_trans_set(trans)->list);
|
||||
nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
|
||||
trans->msg_type, GFP_KERNEL);
|
||||
|
@ -9866,6 +10037,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|||
nft_commit_notify(net, NETLINK_CB(skb).portid);
|
||||
nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
|
||||
nf_tables_commit_audit_log(&adl, nft_net->base_seq);
|
||||
|
||||
WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
|
||||
nf_tables_commit_release(net);
|
||||
|
||||
return 0;
|
||||
|
@ -10915,6 +11088,7 @@ static int __net_init nf_tables_init_net(struct net *net)
|
|||
INIT_LIST_HEAD(&nft_net->notify_list);
|
||||
mutex_init(&nft_net->commit_mutex);
|
||||
nft_net->base_seq = 1;
|
||||
nft_net->gc_seq = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -10943,10 +11117,16 @@ static void __net_exit nf_tables_exit_net(struct net *net)
|
|||
WARN_ON_ONCE(!list_empty(&nft_net->notify_list));
|
||||
}
|
||||
|
||||
static void nf_tables_exit_batch(struct list_head *net_exit_list)
|
||||
{
|
||||
flush_work(&trans_gc_work);
|
||||
}
|
||||
|
||||
static struct pernet_operations nf_tables_net_ops = {
|
||||
.init = nf_tables_init_net,
|
||||
.pre_exit = nf_tables_pre_exit_net,
|
||||
.exit = nf_tables_exit_net,
|
||||
.exit_batch = nf_tables_exit_batch,
|
||||
.id = &nf_tables_net_id,
|
||||
.size = sizeof(struct nftables_pernet),
|
||||
};
|
||||
|
@ -11018,6 +11198,7 @@ static void __exit nf_tables_module_exit(void)
|
|||
nft_chain_filter_fini();
|
||||
nft_chain_route_fini();
|
||||
unregister_pernet_subsys(&nf_tables_net_ops);
|
||||
cancel_work_sync(&trans_gc_work);
|
||||
cancel_work_sync(&trans_destroy_work);
|
||||
rcu_barrier();
|
||||
rhltable_destroy(&nft_objname_ht);
|
||||
|
|
|
@ -59,6 +59,8 @@ static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg,
|
|||
|
||||
if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
|
||||
return 1;
|
||||
if (nft_set_elem_is_dead(&he->ext))
|
||||
return 1;
|
||||
if (nft_set_elem_expired(&he->ext))
|
||||
return 1;
|
||||
if (!nft_set_elem_active(&he->ext, x->genmask))
|
||||
|
@ -188,7 +190,6 @@ static void nft_rhash_activate(const struct net *net, const struct nft_set *set,
|
|||
struct nft_rhash_elem *he = elem->priv;
|
||||
|
||||
nft_set_elem_change_active(net, set, &he->ext);
|
||||
nft_set_elem_clear_busy(&he->ext);
|
||||
}
|
||||
|
||||
static bool nft_rhash_flush(const struct net *net,
|
||||
|
@ -196,12 +197,9 @@ static bool nft_rhash_flush(const struct net *net,
|
|||
{
|
||||
struct nft_rhash_elem *he = priv;
|
||||
|
||||
if (!nft_set_elem_mark_busy(&he->ext) ||
|
||||
!nft_is_active(net, &he->ext)) {
|
||||
nft_set_elem_change_active(net, set, &he->ext);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
nft_set_elem_change_active(net, set, &he->ext);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *nft_rhash_deactivate(const struct net *net,
|
||||
|
@ -218,9 +216,8 @@ static void *nft_rhash_deactivate(const struct net *net,
|
|||
|
||||
rcu_read_lock();
|
||||
he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
|
||||
if (he != NULL &&
|
||||
!nft_rhash_flush(net, set, he))
|
||||
he = NULL;
|
||||
if (he)
|
||||
nft_set_elem_change_active(net, set, &he->ext);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -252,7 +249,9 @@ static bool nft_rhash_delete(const struct nft_set *set,
|
|||
if (he == NULL)
|
||||
return false;
|
||||
|
||||
return rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params) == 0;
|
||||
nft_set_elem_dead(&he->ext);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
|
@ -278,8 +277,6 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
|||
|
||||
if (iter->count < iter->skip)
|
||||
goto cont;
|
||||
if (nft_set_elem_expired(&he->ext))
|
||||
goto cont;
|
||||
if (!nft_set_elem_active(&he->ext, iter->genmask))
|
||||
goto cont;
|
||||
|
||||
|
@ -314,25 +311,48 @@ static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set,
|
|||
|
||||
static void nft_rhash_gc(struct work_struct *work)
|
||||
{
|
||||
struct nftables_pernet *nft_net;
|
||||
struct nft_set *set;
|
||||
struct nft_rhash_elem *he;
|
||||
struct nft_rhash *priv;
|
||||
struct nft_set_gc_batch *gcb = NULL;
|
||||
struct rhashtable_iter hti;
|
||||
struct nft_trans_gc *gc;
|
||||
struct net *net;
|
||||
u32 gc_seq;
|
||||
|
||||
priv = container_of(work, struct nft_rhash, gc_work.work);
|
||||
set = nft_set_container_of(priv);
|
||||
net = read_pnet(&set->net);
|
||||
nft_net = nft_pernet(net);
|
||||
gc_seq = READ_ONCE(nft_net->gc_seq);
|
||||
|
||||
gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
|
||||
if (!gc)
|
||||
goto done;
|
||||
|
||||
rhashtable_walk_enter(&priv->ht, &hti);
|
||||
rhashtable_walk_start(&hti);
|
||||
|
||||
while ((he = rhashtable_walk_next(&hti))) {
|
||||
if (IS_ERR(he)) {
|
||||
if (PTR_ERR(he) != -EAGAIN)
|
||||
break;
|
||||
if (PTR_ERR(he) != -EAGAIN) {
|
||||
nft_trans_gc_destroy(gc);
|
||||
gc = NULL;
|
||||
goto try_later;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Ruleset has been updated, try later. */
|
||||
if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
|
||||
nft_trans_gc_destroy(gc);
|
||||
gc = NULL;
|
||||
goto try_later;
|
||||
}
|
||||
|
||||
if (nft_set_elem_is_dead(&he->ext))
|
||||
goto dead_elem;
|
||||
|
||||
if (nft_set_ext_exists(&he->ext, NFT_SET_EXT_EXPRESSIONS) &&
|
||||
nft_rhash_expr_needs_gc_run(set, &he->ext))
|
||||
goto needs_gc_run;
|
||||
|
@ -340,26 +360,26 @@ static void nft_rhash_gc(struct work_struct *work)
|
|||
if (!nft_set_elem_expired(&he->ext))
|
||||
continue;
|
||||
needs_gc_run:
|
||||
if (nft_set_elem_mark_busy(&he->ext))
|
||||
continue;
|
||||
nft_set_elem_dead(&he->ext);
|
||||
dead_elem:
|
||||
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
|
||||
if (!gc)
|
||||
goto try_later;
|
||||
|
||||
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
|
||||
if (gcb == NULL)
|
||||
break;
|
||||
rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
|
||||
atomic_dec(&set->nelems);
|
||||
nft_set_gc_batch_add(gcb, he);
|
||||
nft_trans_gc_elem_add(gc, he);
|
||||
}
|
||||
|
||||
gc = nft_trans_gc_catchall(gc, gc_seq);
|
||||
|
||||
try_later:
|
||||
/* catchall list iteration requires rcu read side lock. */
|
||||
rhashtable_walk_stop(&hti);
|
||||
rhashtable_walk_exit(&hti);
|
||||
|
||||
he = nft_set_catchall_gc(set);
|
||||
if (he) {
|
||||
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
|
||||
if (gcb)
|
||||
nft_set_gc_batch_add(gcb, he);
|
||||
}
|
||||
nft_set_gc_batch_complete(gcb);
|
||||
if (gc)
|
||||
nft_trans_gc_queue_async_done(gc);
|
||||
|
||||
done:
|
||||
queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
|
||||
nft_set_gc_interval(set));
|
||||
}
|
||||
|
@ -394,7 +414,7 @@ static int nft_rhash_init(const struct nft_set *set,
|
|||
return err;
|
||||
|
||||
INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc);
|
||||
if (set->flags & NFT_SET_TIMEOUT)
|
||||
if (set->flags & (NFT_SET_TIMEOUT | NFT_SET_EVAL))
|
||||
nft_rhash_gc_init(set);
|
||||
|
||||
return 0;
|
||||
|
@ -422,7 +442,6 @@ static void nft_rhash_destroy(const struct nft_ctx *ctx,
|
|||
};
|
||||
|
||||
cancel_delayed_work_sync(&priv->gc_work);
|
||||
rcu_barrier();
|
||||
rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
|
||||
(void *)&rhash_ctx);
|
||||
}
|
||||
|
|
|
@ -566,8 +566,7 @@ next_match:
|
|||
goto out;
|
||||
|
||||
if (last) {
|
||||
if (nft_set_elem_expired(&f->mt[b].e->ext) ||
|
||||
(genmask &&
|
||||
if ((genmask &&
|
||||
!nft_set_elem_active(&f->mt[b].e->ext, genmask)))
|
||||
goto next_match;
|
||||
|
||||
|
@ -601,8 +600,17 @@ out:
|
|||
static void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
|
||||
const struct nft_set_elem *elem, unsigned int flags)
|
||||
{
|
||||
return pipapo_get(net, set, (const u8 *)elem->key.val.data,
|
||||
nft_genmask_cur(net));
|
||||
struct nft_pipapo_elem *ret;
|
||||
|
||||
ret = pipapo_get(net, set, (const u8 *)elem->key.val.data,
|
||||
nft_genmask_cur(net));
|
||||
if (IS_ERR(ret))
|
||||
return ret;
|
||||
|
||||
if (nft_set_elem_expired(&ret->ext))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1528,16 +1536,34 @@ static void pipapo_drop(struct nft_pipapo_match *m,
|
|||
}
|
||||
}
|
||||
|
||||
static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set,
|
||||
struct nft_pipapo_elem *e)
|
||||
|
||||
{
|
||||
struct nft_set_elem elem = {
|
||||
.priv = e,
|
||||
};
|
||||
|
||||
nft_setelem_data_deactivate(net, set, &elem);
|
||||
}
|
||||
|
||||
/**
|
||||
* pipapo_gc() - Drop expired entries from set, destroy start and end elements
|
||||
* @set: nftables API set representation
|
||||
* @m: Matching data
|
||||
*/
|
||||
static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m)
|
||||
static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
|
||||
{
|
||||
struct nft_set *set = (struct nft_set *) _set;
|
||||
struct nft_pipapo *priv = nft_set_priv(set);
|
||||
struct net *net = read_pnet(&set->net);
|
||||
int rules_f0, first_rule = 0;
|
||||
struct nft_pipapo_elem *e;
|
||||
struct nft_trans_gc *gc;
|
||||
|
||||
gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
|
||||
if (!gc)
|
||||
return;
|
||||
|
||||
while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
|
||||
union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
|
||||
|
@ -1561,13 +1587,20 @@ static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m)
|
|||
f--;
|
||||
i--;
|
||||
e = f->mt[rulemap[i].to].e;
|
||||
if (nft_set_elem_expired(&e->ext) &&
|
||||
!nft_set_elem_mark_busy(&e->ext)) {
|
||||
priv->dirty = true;
|
||||
pipapo_drop(m, rulemap);
|
||||
|
||||
rcu_barrier();
|
||||
nft_set_elem_destroy(set, e, true);
|
||||
/* synchronous gc never fails, there is no need to set on
|
||||
* NFT_SET_ELEM_DEAD_BIT.
|
||||
*/
|
||||
if (nft_set_elem_expired(&e->ext)) {
|
||||
priv->dirty = true;
|
||||
|
||||
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
|
||||
if (!gc)
|
||||
break;
|
||||
|
||||
nft_pipapo_gc_deactivate(net, set, e);
|
||||
pipapo_drop(m, rulemap);
|
||||
nft_trans_gc_elem_add(gc, e);
|
||||
|
||||
/* And check again current first rule, which is now the
|
||||
* first we haven't checked.
|
||||
|
@ -1577,11 +1610,11 @@ static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m)
|
|||
}
|
||||
}
|
||||
|
||||
e = nft_set_catchall_gc(set);
|
||||
if (e)
|
||||
nft_set_elem_destroy(set, e, true);
|
||||
|
||||
priv->last_gc = jiffies;
|
||||
gc = nft_trans_gc_catchall(gc, 0);
|
||||
if (gc) {
|
||||
nft_trans_gc_queue_sync_done(gc);
|
||||
priv->last_gc = jiffies;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1706,7 +1739,6 @@ static void nft_pipapo_activate(const struct net *net,
|
|||
return;
|
||||
|
||||
nft_set_elem_change_active(net, set, &e->ext);
|
||||
nft_set_elem_clear_busy(&e->ext);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2005,8 +2037,6 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
|||
goto cont;
|
||||
|
||||
e = f->mt[r].e;
|
||||
if (nft_set_elem_expired(&e->ext))
|
||||
goto cont;
|
||||
|
||||
elem.priv = e;
|
||||
|
||||
|
|
|
@ -46,6 +46,12 @@ static int nft_rbtree_cmp(const struct nft_set *set,
|
|||
set->klen);
|
||||
}
|
||||
|
||||
static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
|
||||
{
|
||||
return nft_set_elem_expired(&rbe->ext) ||
|
||||
nft_set_elem_is_dead(&rbe->ext);
|
||||
}
|
||||
|
||||
static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
|
||||
const u32 *key, const struct nft_set_ext **ext,
|
||||
unsigned int seq)
|
||||
|
@ -80,7 +86,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
|
|||
continue;
|
||||
}
|
||||
|
||||
if (nft_set_elem_expired(&rbe->ext))
|
||||
if (nft_rbtree_elem_expired(rbe))
|
||||
return false;
|
||||
|
||||
if (nft_rbtree_interval_end(rbe)) {
|
||||
|
@ -98,7 +104,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
|
|||
|
||||
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
|
||||
nft_set_elem_active(&interval->ext, genmask) &&
|
||||
!nft_set_elem_expired(&interval->ext) &&
|
||||
!nft_rbtree_elem_expired(interval) &&
|
||||
nft_rbtree_interval_start(interval)) {
|
||||
*ext = &interval->ext;
|
||||
return true;
|
||||
|
@ -215,6 +221,18 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
|
|||
return rbe;
|
||||
}
|
||||
|
||||
static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
|
||||
struct nft_rbtree *priv,
|
||||
struct nft_rbtree_elem *rbe)
|
||||
{
|
||||
struct nft_set_elem elem = {
|
||||
.priv = rbe,
|
||||
};
|
||||
|
||||
nft_setelem_data_deactivate(net, set, &elem);
|
||||
rb_erase(&rbe->node, &priv->root);
|
||||
}
|
||||
|
||||
static int nft_rbtree_gc_elem(const struct nft_set *__set,
|
||||
struct nft_rbtree *priv,
|
||||
struct nft_rbtree_elem *rbe,
|
||||
|
@ -222,11 +240,12 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
|
|||
{
|
||||
struct nft_set *set = (struct nft_set *)__set;
|
||||
struct rb_node *prev = rb_prev(&rbe->node);
|
||||
struct net *net = read_pnet(&set->net);
|
||||
struct nft_rbtree_elem *rbe_prev;
|
||||
struct nft_set_gc_batch *gcb;
|
||||
struct nft_trans_gc *gc;
|
||||
|
||||
gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
|
||||
if (!gcb)
|
||||
gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
|
||||
if (!gc)
|
||||
return -ENOMEM;
|
||||
|
||||
/* search for end interval coming before this element.
|
||||
|
@ -244,17 +263,28 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
|
|||
|
||||
if (prev) {
|
||||
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
|
||||
nft_rbtree_gc_remove(net, set, priv, rbe_prev);
|
||||
|
||||
rb_erase(&rbe_prev->node, &priv->root);
|
||||
atomic_dec(&set->nelems);
|
||||
nft_set_gc_batch_add(gcb, rbe_prev);
|
||||
/* There is always room in this trans gc for this element,
|
||||
* memory allocation never actually happens, hence, the warning
|
||||
* splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
|
||||
* this is synchronous gc which never fails.
|
||||
*/
|
||||
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
|
||||
if (WARN_ON_ONCE(!gc))
|
||||
return -ENOMEM;
|
||||
|
||||
nft_trans_gc_elem_add(gc, rbe_prev);
|
||||
}
|
||||
|
||||
rb_erase(&rbe->node, &priv->root);
|
||||
atomic_dec(&set->nelems);
|
||||
nft_rbtree_gc_remove(net, set, priv, rbe);
|
||||
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
|
||||
if (WARN_ON_ONCE(!gc))
|
||||
return -ENOMEM;
|
||||
|
||||
nft_set_gc_batch_add(gcb, rbe);
|
||||
nft_set_gc_batch_complete(gcb);
|
||||
nft_trans_gc_elem_add(gc, rbe);
|
||||
|
||||
nft_trans_gc_queue_sync_done(gc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -482,7 +512,6 @@ static void nft_rbtree_activate(const struct net *net,
|
|||
struct nft_rbtree_elem *rbe = elem->priv;
|
||||
|
||||
nft_set_elem_change_active(net, set, &rbe->ext);
|
||||
nft_set_elem_clear_busy(&rbe->ext);
|
||||
}
|
||||
|
||||
static bool nft_rbtree_flush(const struct net *net,
|
||||
|
@ -490,12 +519,9 @@ static bool nft_rbtree_flush(const struct net *net,
|
|||
{
|
||||
struct nft_rbtree_elem *rbe = priv;
|
||||
|
||||
if (!nft_set_elem_mark_busy(&rbe->ext) ||
|
||||
!nft_is_active(net, &rbe->ext)) {
|
||||
nft_set_elem_change_active(net, set, &rbe->ext);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
nft_set_elem_change_active(net, set, &rbe->ext);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *nft_rbtree_deactivate(const struct net *net,
|
||||
|
@ -552,8 +578,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
|
|||
|
||||
if (iter->count < iter->skip)
|
||||
goto cont;
|
||||
if (nft_set_elem_expired(&rbe->ext))
|
||||
goto cont;
|
||||
if (!nft_set_elem_active(&rbe->ext, iter->genmask))
|
||||
goto cont;
|
||||
|
||||
|
@ -572,26 +596,40 @@ cont:
|
|||
|
||||
static void nft_rbtree_gc(struct work_struct *work)
|
||||
{
|
||||
struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
|
||||
struct nft_set_gc_batch *gcb = NULL;
|
||||
struct nft_rbtree_elem *rbe, *rbe_end = NULL;
|
||||
struct nftables_pernet *nft_net;
|
||||
struct nft_rbtree *priv;
|
||||
struct nft_trans_gc *gc;
|
||||
struct rb_node *node;
|
||||
struct nft_set *set;
|
||||
unsigned int gc_seq;
|
||||
struct net *net;
|
||||
u8 genmask;
|
||||
|
||||
priv = container_of(work, struct nft_rbtree, gc_work.work);
|
||||
set = nft_set_container_of(priv);
|
||||
net = read_pnet(&set->net);
|
||||
genmask = nft_genmask_cur(net);
|
||||
nft_net = nft_pernet(net);
|
||||
gc_seq = READ_ONCE(nft_net->gc_seq);
|
||||
|
||||
gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
|
||||
if (!gc)
|
||||
goto done;
|
||||
|
||||
write_lock_bh(&priv->lock);
|
||||
write_seqcount_begin(&priv->count);
|
||||
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
|
||||
|
||||
/* Ruleset has been updated, try later. */
|
||||
if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
|
||||
nft_trans_gc_destroy(gc);
|
||||
gc = NULL;
|
||||
goto try_later;
|
||||
}
|
||||
|
||||
rbe = rb_entry(node, struct nft_rbtree_elem, node);
|
||||
|
||||
if (!nft_set_elem_active(&rbe->ext, genmask))
|
||||
continue;
|
||||
if (nft_set_elem_is_dead(&rbe->ext))
|
||||
goto dead_elem;
|
||||
|
||||
/* elements are reversed in the rbtree for historical reasons,
|
||||
* from highest to lowest value, that is why end element is
|
||||
|
@ -604,46 +642,36 @@ static void nft_rbtree_gc(struct work_struct *work)
|
|||
if (!nft_set_elem_expired(&rbe->ext))
|
||||
continue;
|
||||
|
||||
if (nft_set_elem_mark_busy(&rbe->ext)) {
|
||||
rbe_end = NULL;
|
||||
nft_set_elem_dead(&rbe->ext);
|
||||
|
||||
if (!rbe_end)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (rbe_prev) {
|
||||
rb_erase(&rbe_prev->node, &priv->root);
|
||||
rbe_prev = NULL;
|
||||
}
|
||||
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
|
||||
if (!gcb)
|
||||
break;
|
||||
nft_set_elem_dead(&rbe_end->ext);
|
||||
|
||||
atomic_dec(&set->nelems);
|
||||
nft_set_gc_batch_add(gcb, rbe);
|
||||
rbe_prev = rbe;
|
||||
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
|
||||
if (!gc)
|
||||
goto try_later;
|
||||
|
||||
if (rbe_end) {
|
||||
atomic_dec(&set->nelems);
|
||||
nft_set_gc_batch_add(gcb, rbe_end);
|
||||
rb_erase(&rbe_end->node, &priv->root);
|
||||
rbe_end = NULL;
|
||||
}
|
||||
node = rb_next(node);
|
||||
if (!node)
|
||||
break;
|
||||
nft_trans_gc_elem_add(gc, rbe_end);
|
||||
rbe_end = NULL;
|
||||
dead_elem:
|
||||
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
|
||||
if (!gc)
|
||||
goto try_later;
|
||||
|
||||
nft_trans_gc_elem_add(gc, rbe);
|
||||
}
|
||||
if (rbe_prev)
|
||||
rb_erase(&rbe_prev->node, &priv->root);
|
||||
|
||||
gc = nft_trans_gc_catchall(gc, gc_seq);
|
||||
|
||||
try_later:
|
||||
write_seqcount_end(&priv->count);
|
||||
write_unlock_bh(&priv->lock);
|
||||
|
||||
rbe = nft_set_catchall_gc(set);
|
||||
if (rbe) {
|
||||
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
|
||||
if (gcb)
|
||||
nft_set_gc_batch_add(gcb, rbe);
|
||||
}
|
||||
nft_set_gc_batch_complete(gcb);
|
||||
|
||||
if (gc)
|
||||
nft_trans_gc_queue_async_done(gc);
|
||||
done:
|
||||
queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
|
||||
nft_set_gc_interval(set));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue