netfilter: conntrack: use get_random_once for conntrack hash seed
As earlier commit removed accessed to the hash from other files we can also make it static. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
7001c6d109
commit
141658fb02
|
@ -289,8 +289,6 @@ struct kernel_param;
|
||||||
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
|
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
|
||||||
extern unsigned int nf_conntrack_htable_size;
|
extern unsigned int nf_conntrack_htable_size;
|
||||||
extern unsigned int nf_conntrack_max;
|
extern unsigned int nf_conntrack_max;
|
||||||
extern unsigned int nf_conntrack_hash_rnd;
|
|
||||||
void init_nf_conntrack_hash_rnd(void);
|
|
||||||
|
|
||||||
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
|
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
|
||||||
const struct nf_conntrack_zone *zone,
|
const struct nf_conntrack_zone *zone,
|
||||||
|
|
|
@ -142,13 +142,14 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
|
||||||
DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
|
DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
|
||||||
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
|
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
|
||||||
|
|
||||||
unsigned int nf_conntrack_hash_rnd __read_mostly;
|
static unsigned int nf_conntrack_hash_rnd __read_mostly;
|
||||||
EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
|
|
||||||
|
|
||||||
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
|
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
|
||||||
{
|
{
|
||||||
unsigned int n;
|
unsigned int n;
|
||||||
|
|
||||||
|
get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
|
||||||
|
|
||||||
/* The direction must be ignored, so we hash everything up to the
|
/* The direction must be ignored, so we hash everything up to the
|
||||||
* destination ports (which is a multiple of 4) and treat the last
|
* destination ports (which is a multiple of 4) and treat the last
|
||||||
* three bytes manually.
|
* three bytes manually.
|
||||||
|
@ -815,21 +816,6 @@ restart:
|
||||||
return dropped;
|
return dropped;
|
||||||
}
|
}
|
||||||
|
|
||||||
void init_nf_conntrack_hash_rnd(void)
|
|
||||||
{
|
|
||||||
unsigned int rand;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Why not initialize nf_conntrack_rnd in a "init()" function ?
|
|
||||||
* Because there isn't enough entropy when system initializing,
|
|
||||||
* and we initialize it as late as possible.
|
|
||||||
*/
|
|
||||||
do {
|
|
||||||
get_random_bytes(&rand, sizeof(rand));
|
|
||||||
} while (!rand);
|
|
||||||
cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct nf_conn *
|
static struct nf_conn *
|
||||||
__nf_conntrack_alloc(struct net *net,
|
__nf_conntrack_alloc(struct net *net,
|
||||||
const struct nf_conntrack_zone *zone,
|
const struct nf_conntrack_zone *zone,
|
||||||
|
@ -839,12 +825,6 @@ __nf_conntrack_alloc(struct net *net,
|
||||||
{
|
{
|
||||||
struct nf_conn *ct;
|
struct nf_conn *ct;
|
||||||
|
|
||||||
if (unlikely(!nf_conntrack_hash_rnd)) {
|
|
||||||
init_nf_conntrack_hash_rnd();
|
|
||||||
/* recompute the hash as nf_conntrack_hash_rnd is initialized */
|
|
||||||
hash = hash_conntrack_raw(orig);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We don't want any race condition at early drop stage */
|
/* We don't want any race condition at early drop stage */
|
||||||
atomic_inc(&net->ct.count);
|
atomic_inc(&net->ct.count);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue