netfilter: nf_conncount: Fix garbage collection with zones
Currently, we use check_hlist() for garbage colleciton. However, we
use the ‘zone’ from the counted entry to query the existence of
existing entries in the hlist. This could be wrong when they are in
different zones, and this patch fixes this issue.
Fixes: e59ea3df3f
("netfilter: xt_connlimit: honor conntrack zone if available")
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
fc6ddbecce
commit
21ba8847f8
|
@ -20,7 +20,8 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
|
|||
bool *addit);
|
||||
|
||||
bool nf_conncount_add(struct hlist_head *head,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_zone *zone);
|
||||
|
||||
void nf_conncount_cache_free(struct hlist_head *hhead);
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
struct nf_conncount_tuple {
|
||||
struct hlist_node node;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_conntrack_zone zone;
|
||||
};
|
||||
|
||||
struct nf_conncount_rb {
|
||||
|
@ -80,7 +81,8 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
|
|||
}
|
||||
|
||||
bool nf_conncount_add(struct hlist_head *head,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_zone *zone)
|
||||
{
|
||||
struct nf_conncount_tuple *conn;
|
||||
|
||||
|
@ -88,6 +90,7 @@ bool nf_conncount_add(struct hlist_head *head,
|
|||
if (conn == NULL)
|
||||
return false;
|
||||
conn->tuple = *tuple;
|
||||
conn->zone = *zone;
|
||||
hlist_add_head(&conn->node, head);
|
||||
return true;
|
||||
}
|
||||
|
@ -108,7 +111,7 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
|
|||
|
||||
/* check the saved connections */
|
||||
hlist_for_each_entry_safe(conn, n, head, node) {
|
||||
found = nf_conntrack_find_get(net, zone, &conn->tuple);
|
||||
found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
|
||||
if (found == NULL) {
|
||||
hlist_del(&conn->node);
|
||||
kmem_cache_free(conncount_conn_cachep, conn);
|
||||
|
@ -117,7 +120,8 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
|
|||
|
||||
found_ct = nf_ct_tuplehash_to_ctrack(found);
|
||||
|
||||
if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple)) {
|
||||
if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
|
||||
nf_ct_zone_equal(found_ct, zone, zone->dir)) {
|
||||
/*
|
||||
* Just to be sure we have it only once in the list.
|
||||
* We should not see tuples twice unless someone hooks
|
||||
|
@ -196,7 +200,7 @@ count_tree(struct net *net, struct rb_root *root,
|
|||
if (!addit)
|
||||
return count;
|
||||
|
||||
if (!nf_conncount_add(&rbconn->hhead, tuple))
|
||||
if (!nf_conncount_add(&rbconn->hhead, tuple, zone))
|
||||
return 0; /* hotdrop */
|
||||
|
||||
return count + 1;
|
||||
|
@ -238,6 +242,7 @@ count_tree(struct net *net, struct rb_root *root,
|
|||
}
|
||||
|
||||
conn->tuple = *tuple;
|
||||
conn->zone = *zone;
|
||||
memcpy(rbconn->key, key, sizeof(u32) * keylen);
|
||||
|
||||
INIT_HLIST_HEAD(&rbconn->hhead);
|
||||
|
|
|
@ -52,7 +52,7 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
|
|||
if (!addit)
|
||||
goto out;
|
||||
|
||||
if (!nf_conncount_add(&priv->hhead, tuple_ptr)) {
|
||||
if (!nf_conncount_add(&priv->hhead, tuple_ptr, zone)) {
|
||||
regs->verdict.code = NF_DROP;
|
||||
spin_unlock_bh(&priv->lock);
|
||||
return;
|
||||
|
|
Loading…
Reference in New Issue