net: sched: act_skbmod: remove dependency on rtnl lock

Move read of skbmod_p rcu pointer to be protected by tcf spinlock. Use tcf
spinlock to protect private skbmod data from concurrent modification during
dump.

Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Vlad Buslov 2018-08-10 20:51:49 +03:00 committed by David S. Miller
parent 5e48180ed8
commit c8814552fe
1 changed files with 9 additions and 5 deletions

View File

@ -156,7 +156,6 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
d = to_skbmod(*a); d = to_skbmod(*a);
ASSERT_RTNL();
p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
if (unlikely(!p)) { if (unlikely(!p)) {
tcf_idr_release(*a, bind); tcf_idr_release(*a, bind);
@ -166,10 +165,10 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
p->flags = lflags; p->flags = lflags;
d->tcf_action = parm->action; d->tcf_action = parm->action;
p_old = rtnl_dereference(d->skbmod_p);
if (ovr) if (ovr)
spin_lock_bh(&d->tcf_lock); spin_lock_bh(&d->tcf_lock);
/* Protected by tcf_lock if overwriting existing action. */
p_old = rcu_dereference_protected(d->skbmod_p, 1);
if (lflags & SKBMOD_F_DMAC) if (lflags & SKBMOD_F_DMAC)
ether_addr_copy(p->eth_dst, daddr); ether_addr_copy(p->eth_dst, daddr);
@ -205,15 +204,18 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
{ {
struct tcf_skbmod *d = to_skbmod(a); struct tcf_skbmod *d = to_skbmod(a);
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbmod_params *p = rtnl_dereference(d->skbmod_p); struct tcf_skbmod_params *p;
struct tc_skbmod opt = { struct tc_skbmod opt = {
.index = d->tcf_index, .index = d->tcf_index,
.refcnt = refcount_read(&d->tcf_refcnt) - ref, .refcnt = refcount_read(&d->tcf_refcnt) - ref,
.bindcnt = atomic_read(&d->tcf_bindcnt) - bind, .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
.action = d->tcf_action,
}; };
struct tcf_t t; struct tcf_t t;
spin_lock_bh(&d->tcf_lock);
opt.action = d->tcf_action;
p = rcu_dereference_protected(d->skbmod_p,
lockdep_is_held(&d->tcf_lock));
opt.flags = p->flags; opt.flags = p->flags;
if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt)) if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
goto nla_put_failure; goto nla_put_failure;
@ -231,8 +233,10 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD)) if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
goto nla_put_failure; goto nla_put_failure;
spin_unlock_bh(&d->tcf_lock);
return skb->len; return skb->len;
nla_put_failure: nla_put_failure:
spin_unlock_bh(&d->tcf_lock);
nlmsg_trim(skb, b); nlmsg_trim(skb, b);
return -1; return -1;
} }