net/flowcache: Convert to hotplug state machine
Install the callbacks via the state machine. Use multi state support to avoid custom list handling for the multiple instances. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Steffen Klassert <steffen.klassert@secunet.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: netdev@vger.kernel.org Cc: rt@linutronix.de Cc: "David S. Miller" <davem@davemloft.net> Link: http://lkml.kernel.org/r/20161103145021.28528-10-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
f0bf90def3
commit
a4fc1bfc42
|
@ -56,6 +56,7 @@ enum cpuhp_state {
|
|||
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
|
||||
CPUHP_SH_SH3X_PREPARE,
|
||||
CPUHP_BLK_MQ_PREPARE,
|
||||
CPUHP_NET_FLOW_PREPARE,
|
||||
CPUHP_TIMERS_DEAD,
|
||||
CPUHP_NOTF_ERR_INJ_PREPARE,
|
||||
CPUHP_MIPS_SOC_PREPARE,
|
||||
|
|
|
@ -239,6 +239,7 @@ struct flow_cache_object *flow_cache_lookup(struct net *net,
|
|||
void *ctx);
|
||||
int flow_cache_init(struct net *net);
|
||||
void flow_cache_fini(struct net *net);
|
||||
void flow_cache_hp_init(void);
|
||||
|
||||
void flow_cache_flush(struct net *net);
|
||||
void flow_cache_flush_deferred(struct net *net);
|
||||
|
|
|
@ -17,7 +17,7 @@ struct flow_cache_percpu {
|
|||
struct flow_cache {
|
||||
u32 hash_shift;
|
||||
struct flow_cache_percpu __percpu *percpu;
|
||||
struct notifier_block hotcpu_notifier;
|
||||
struct hlist_node node;
|
||||
int low_watermark;
|
||||
int high_watermark;
|
||||
struct timer_list rnd_timer;
|
||||
|
|
|
@ -419,28 +419,20 @@ static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int flow_cache_cpu(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct flow_cache *fc = container_of(nfb, struct flow_cache,
|
||||
hotcpu_notifier);
|
||||
int res, cpu = (unsigned long) hcpu;
|
||||
struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
|
||||
|
||||
return flow_cache_cpu_prepare(fc, cpu);
|
||||
}
|
||||
|
||||
static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
|
||||
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
|
||||
|
||||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
res = flow_cache_cpu_prepare(fc, cpu);
|
||||
if (res)
|
||||
return notifier_from_errno(res);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
case CPU_DEAD_FROZEN:
|
||||
__flow_cache_shrink(fc, fcp, 0);
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
__flow_cache_shrink(fc, fcp, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int flow_cache_init(struct net *net)
|
||||
|
@ -467,18 +459,8 @@ int flow_cache_init(struct net *net)
|
|||
if (!fc->percpu)
|
||||
return -ENOMEM;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
if (flow_cache_cpu_prepare(fc, i))
|
||||
goto err;
|
||||
}
|
||||
fc->hotcpu_notifier = (struct notifier_block){
|
||||
.notifier_call = flow_cache_cpu,
|
||||
};
|
||||
__register_hotcpu_notifier(&fc->hotcpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node))
|
||||
goto err;
|
||||
|
||||
setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
|
||||
(unsigned long) fc);
|
||||
|
@ -494,8 +476,6 @@ err:
|
|||
fcp->hash_table = NULL;
|
||||
}
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
free_percpu(fc->percpu);
|
||||
fc->percpu = NULL;
|
||||
|
||||
|
@ -509,7 +489,8 @@ void flow_cache_fini(struct net *net)
|
|||
struct flow_cache *fc = &net->xfrm.flow_cache_global;
|
||||
|
||||
del_timer_sync(&fc->rnd_timer);
|
||||
unregister_hotcpu_notifier(&fc->hotcpu_notifier);
|
||||
|
||||
cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
|
||||
|
@ -521,3 +502,14 @@ void flow_cache_fini(struct net *net)
|
|||
fc->percpu = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(flow_cache_fini);
|
||||
|
||||
void __init flow_cache_hp_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE,
|
||||
"net/flow:prepare",
|
||||
flow_cache_cpu_up_prep,
|
||||
flow_cache_cpu_dead);
|
||||
WARN_ON(ret < 0);
|
||||
}
|
||||
|
|
|
@ -3111,6 +3111,7 @@ static struct pernet_operations __net_initdata xfrm_net_ops = {
|
|||
|
||||
void __init xfrm_init(void)
|
||||
{
|
||||
flow_cache_hp_init();
|
||||
register_pernet_subsys(&xfrm_net_ops);
|
||||
seqcount_init(&xfrm_policy_hash_generation);
|
||||
xfrm_input_init();
|
||||
|
|
Loading…
Reference in New Issue