From ec3ed293e76656c1fb5388249e1e61c7d274abfc Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Wed, 19 Sep 2018 16:37:29 -0700 Subject: [PATCH] net_sched: change tcf_del_walker() to take idrinfo->lock Action API was changed to work with actions and action_idr in concurrency safe manner, however tcf_del_walker() still uses actions without taking a reference or idrinfo->lock first, and deletes them directly, disregarding possible concurrent delete. Change tcf_del_walker() to take idrinfo->lock while iterating over actions and use new tcf_idr_release_unsafe() to release them while holding the lock. And the blocking function fl_hw_destroy_tmplt() could be called when we put a filter chain, so defer it to a work queue. Signed-off-by: Vlad Buslov [xiyou.wangcong@gmail.com: heavily modify the code and changelog] Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_api.c | 20 +++++++++++++++++++- net/sched/cls_flower.c | 13 +++++++++++-- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 6f118d62c731..fac8c769454f 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -246,6 +246,20 @@ nla_put_failure: goto done; } +static int tcf_idr_release_unsafe(struct tc_action *p) +{ + if (atomic_read(&p->tcfa_bindcnt) > 0) + return -EPERM; + + if (refcount_dec_and_test(&p->tcfa_refcnt)) { + idr_remove(&p->idrinfo->action_idr, p->tcfa_index); + tcf_action_cleanup(p); + return ACT_P_DELETED; + } + + return 0; +} + static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, const struct tc_action_ops *ops) { @@ -262,15 +276,19 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, if (nla_put_string(skb, TCA_KIND, ops->kind)) goto nla_put_failure; + spin_lock(&idrinfo->lock); idr_for_each_entry_ul(idr, p, id) { - ret = __tcf_idr_release(p, false, true); + ret = tcf_idr_release_unsafe(p); if (ret == ACT_P_DELETED) { module_put(ops->owner); n_i++; } else if (ret < 0) { + spin_unlock(&idrinfo->lock); goto nla_put_failure; } } + spin_unlock(&idrinfo->lock); + if (nla_put_u32(skb, TCA_FCNT, n_i)) goto nla_put_failure; nla_nest_end(skb, nest); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 9aada2d0ef06..92dd5071a708 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -79,6 +79,7 @@ struct fl_flow_tmplt { struct fl_flow_key mask; struct flow_dissector dissector; struct tcf_chain *chain; + struct rcu_work rwork; }; struct cls_fl_head { @@ -1437,12 +1438,20 @@ errout_tb: return ERR_PTR(err); } +static void fl_tmplt_destroy_work(struct work_struct *work) +{ + struct fl_flow_tmplt *tmplt = container_of(to_rcu_work(work), + struct fl_flow_tmplt, rwork); + + fl_hw_destroy_tmplt(tmplt->chain, tmplt); + kfree(tmplt); +} + static void fl_tmplt_destroy(void *tmplt_priv) { struct fl_flow_tmplt *tmplt = tmplt_priv; - fl_hw_destroy_tmplt(tmplt->chain, tmplt); - kfree(tmplt); + tcf_queue_work(&tmplt->rwork, fl_tmplt_destroy_work); } static int fl_dump_key_val(struct sk_buff *skb,