Merge branch 'net_sched-fix-races-with-RCU-callbacks'
Cong Wang says: ==================== net_sched: fix races with RCU callbacks Recently, the RCU callbacks used in TC filters and TC actions keep drawing my attention, they introduce at least 4 race condition bugs: 1. A simple one fixed by Daniel: commitc78e1746d3
Author: Daniel Borkmann <daniel@iogearbox.net> Date: Wed May 20 17:13:33 2015 +0200 net: sched: fix call_rcu() race on classifier module unloads 2. A very nasty one fixed by me: commit1697c4bb52
Author: Cong Wang <xiyou.wangcong@gmail.com> Date: Mon Sep 11 16:33:32 2017 -0700 net_sched: carefully handle tcf_block_put() 3. Two more bugs found by Chris: https://patchwork.ozlabs.org/patch/826696/ https://patchwork.ozlabs.org/patch/826695/ Usually RCU callbacks are simple, however for TC filters and actions, they are complex because at least TC actions could be destroyed together with the TC filter in one callback. And RCU callbacks are invoked in BH context, without locking they are parallel too. All of these contribute to the cause of these nasty bugs. Alternatively, we could also: a) Introduce a spinlock to serialize these RCU callbacks. But as I said in commit1697c4bb52
("net_sched: carefully handle tcf_block_put()"), it is very hard to do because of tcf_chain_dump(). Potentially we need to do a lot of work to make it possible (if not impossible). b) Just get rid of these RCU callbacks, because they are not necessary at all, callers of these call_rcu() are all on slow paths and holding RTNL lock, so blocking is allowed in their contexts. However, David and Eric dislike adding synchronize_rcu() here. As suggested by Paul, we could defer the work to a workqueue and gain the permission of holding RTNL again without any performance impact, however, in tcf_block_put() we could have a deadlock when flushing workqueue while hodling RTNL lock, the trick here is to defer the work itself in workqueue and make it queued after all other works so that we keep the same ordering to avoid any use-after-free. Please see the first patch for details. Patch 1 introduces the infrastructure, patch 2~12 move each tc filter to the new tc filter workqueue, patch 13 adds an assertion to catch potential bugs like this, patch 14 closes another rcu callback race, patch 15 and patch 16 add new test cases. ==================== Reported-by: Chris Mi <chrism@mellanox.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Jiri Pirko <jiri@resnulli.us> Cc: John Fastabend <john.fastabend@gmail.com> Cc: Jamal Hadi Salim <jhs@mojatatu.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6c325f4eca
|
@ -2,6 +2,7 @@
|
|||
#define __NET_PKT_CLS_H
|
||||
|
||||
#include <linux/pkt_cls.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <net/sch_generic.h>
|
||||
#include <net/act_api.h>
|
||||
|
||||
|
@ -17,6 +18,8 @@ struct tcf_walker {
|
|||
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
|
||||
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
|
||||
|
||||
bool tcf_queue_work(struct work_struct *work);
|
||||
|
||||
#ifdef CONFIG_NET_CLS
|
||||
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
|
||||
bool create);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/dynamic_queue_limits.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <net/gen_stats.h>
|
||||
#include <net/rtnetlink.h>
|
||||
|
||||
|
@ -271,6 +272,7 @@ struct tcf_chain {
|
|||
|
||||
struct tcf_block {
|
||||
struct list_head chain_list;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
|
||||
|
|
|
@ -264,6 +264,7 @@ static int __init sample_init_module(void)
|
|||
|
||||
static void __exit sample_cleanup_module(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
tcf_unregister_action(&act_sample_ops, &sample_net_ops);
|
||||
}
|
||||
|
||||
|
|
|
@ -77,6 +77,8 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL(register_tcf_proto_ops);
|
||||
|
||||
static struct workqueue_struct *tc_filter_wq;
|
||||
|
||||
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
|
||||
{
|
||||
struct tcf_proto_ops *t;
|
||||
|
@ -86,6 +88,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
|
|||
* tcf_proto_ops's destroy() handler.
|
||||
*/
|
||||
rcu_barrier();
|
||||
flush_workqueue(tc_filter_wq);
|
||||
|
||||
write_lock(&cls_mod_lock);
|
||||
list_for_each_entry(t, &tcf_proto_base, head) {
|
||||
|
@ -100,6 +103,12 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
|
|||
}
|
||||
EXPORT_SYMBOL(unregister_tcf_proto_ops);
|
||||
|
||||
bool tcf_queue_work(struct work_struct *work)
|
||||
{
|
||||
return queue_work(tc_filter_wq, work);
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_queue_work);
|
||||
|
||||
/* Select new prio value from the range, managed by kernel. */
|
||||
|
||||
static inline u32 tcf_auto_prio(struct tcf_proto *tp)
|
||||
|
@ -266,23 +275,30 @@ err_chain_create:
|
|||
}
|
||||
EXPORT_SYMBOL(tcf_block_get);
|
||||
|
||||
void tcf_block_put(struct tcf_block *block)
|
||||
static void tcf_block_put_final(struct work_struct *work)
|
||||
{
|
||||
struct tcf_block *block = container_of(work, struct tcf_block, work);
|
||||
struct tcf_chain *chain, *tmp;
|
||||
|
||||
if (!block)
|
||||
return;
|
||||
/* At this point, all the chains should have refcnt == 1. */
|
||||
rtnl_lock();
|
||||
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
|
||||
tcf_chain_put(chain);
|
||||
rtnl_unlock();
|
||||
kfree(block);
|
||||
}
|
||||
|
||||
/* XXX: Standalone actions are not allowed to jump to any chain, and
|
||||
* bound actions should be all removed after flushing. However,
|
||||
* filters are destroyed in RCU callbacks, we have to hold the chains
|
||||
* first, otherwise we would always race with RCU callbacks on this list
|
||||
* without proper locking.
|
||||
*/
|
||||
|
||||
/* Wait for existing RCU callbacks to cool down. */
|
||||
rcu_barrier();
|
||||
/* XXX: Standalone actions are not allowed to jump to any chain, and bound
|
||||
* actions should be all removed after flushing. However, filters are destroyed
|
||||
* in RCU callbacks, we have to hold the chains first, otherwise we would
|
||||
* always race with RCU callbacks on this list without proper locking.
|
||||
*/
|
||||
static void tcf_block_put_deferred(struct work_struct *work)
|
||||
{
|
||||
struct tcf_block *block = container_of(work, struct tcf_block, work);
|
||||
struct tcf_chain *chain;
|
||||
|
||||
rtnl_lock();
|
||||
/* Hold a refcnt for all chains, except 0, in case they are gone. */
|
||||
list_for_each_entry(chain, &block->chain_list, list)
|
||||
if (chain->index)
|
||||
|
@ -292,13 +308,27 @@ void tcf_block_put(struct tcf_block *block)
|
|||
list_for_each_entry(chain, &block->chain_list, list)
|
||||
tcf_chain_flush(chain);
|
||||
|
||||
/* Wait for RCU callbacks to release the reference count. */
|
||||
INIT_WORK(&block->work, tcf_block_put_final);
|
||||
/* Wait for RCU callbacks to release the reference count and make
|
||||
* sure their works have been queued before this.
|
||||
*/
|
||||
rcu_barrier();
|
||||
tcf_queue_work(&block->work);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/* At this point, all the chains should have refcnt == 1. */
|
||||
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
|
||||
tcf_chain_put(chain);
|
||||
kfree(block);
|
||||
void tcf_block_put(struct tcf_block *block)
|
||||
{
|
||||
if (!block)
|
||||
return;
|
||||
|
||||
INIT_WORK(&block->work, tcf_block_put_deferred);
|
||||
/* Wait for existing RCU callbacks to cool down, make sure their works
|
||||
* have been queued before this. We can not flush pending works here
|
||||
* because we are holding the RTNL lock.
|
||||
*/
|
||||
rcu_barrier();
|
||||
tcf_queue_work(&block->work);
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_put);
|
||||
|
||||
|
@ -879,6 +909,7 @@ void tcf_exts_destroy(struct tcf_exts *exts)
|
|||
#ifdef CONFIG_NET_CLS_ACT
|
||||
LIST_HEAD(actions);
|
||||
|
||||
ASSERT_RTNL();
|
||||
tcf_exts_to_list(exts, &actions);
|
||||
tcf_action_destroy(&actions, TCA_ACT_UNBIND);
|
||||
kfree(exts->actions);
|
||||
|
@ -1030,6 +1061,10 @@ EXPORT_SYMBOL(tcf_exts_get_dev);
|
|||
|
||||
static int __init tc_filter_init(void)
|
||||
{
|
||||
tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
|
||||
if (!tc_filter_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
|
||||
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
|
||||
rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
|
||||
|
|
|
@ -34,7 +34,10 @@ struct basic_filter {
|
|||
struct tcf_result res;
|
||||
struct tcf_proto *tp;
|
||||
struct list_head link;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
};
|
||||
|
||||
static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
|
@ -82,13 +85,24 @@ static int basic_init(struct tcf_proto *tp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void basic_delete_filter_work(struct work_struct *work)
|
||||
{
|
||||
struct basic_filter *f = container_of(work, struct basic_filter, work);
|
||||
|
||||
rtnl_lock();
|
||||
tcf_exts_destroy(&f->exts);
|
||||
tcf_em_tree_destroy(&f->ematches);
|
||||
rtnl_unlock();
|
||||
|
||||
kfree(f);
|
||||
}
|
||||
|
||||
static void basic_delete_filter(struct rcu_head *head)
|
||||
{
|
||||
struct basic_filter *f = container_of(head, struct basic_filter, rcu);
|
||||
|
||||
tcf_exts_destroy(&f->exts);
|
||||
tcf_em_tree_destroy(&f->ematches);
|
||||
kfree(f);
|
||||
INIT_WORK(&f->work, basic_delete_filter_work);
|
||||
tcf_queue_work(&f->work);
|
||||
}
|
||||
|
||||
static void basic_destroy(struct tcf_proto *tp)
|
||||
|
|
|
@ -49,7 +49,10 @@ struct cls_bpf_prog {
|
|||
struct sock_filter *bpf_ops;
|
||||
const char *bpf_name;
|
||||
struct tcf_proto *tp;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
};
|
||||
|
||||
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
|
||||
|
@ -257,9 +260,21 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
|
|||
kfree(prog);
|
||||
}
|
||||
|
||||
static void cls_bpf_delete_prog_work(struct work_struct *work)
|
||||
{
|
||||
struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
|
||||
|
||||
rtnl_lock();
|
||||
__cls_bpf_delete_prog(prog);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
__cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
|
||||
struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
|
||||
|
||||
INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
|
||||
tcf_queue_work(&prog->work);
|
||||
}
|
||||
|
||||
static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
|
||||
|
|
|
@ -23,7 +23,10 @@ struct cls_cgroup_head {
|
|||
struct tcf_exts exts;
|
||||
struct tcf_ematch_tree ematches;
|
||||
struct tcf_proto *tp;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
};
|
||||
|
||||
static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
|
@ -57,15 +60,26 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
|
|||
[TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
|
||||
};
|
||||
|
||||
static void cls_cgroup_destroy_work(struct work_struct *work)
|
||||
{
|
||||
struct cls_cgroup_head *head = container_of(work,
|
||||
struct cls_cgroup_head,
|
||||
work);
|
||||
rtnl_lock();
|
||||
tcf_exts_destroy(&head->exts);
|
||||
tcf_em_tree_destroy(&head->ematches);
|
||||
kfree(head);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void cls_cgroup_destroy_rcu(struct rcu_head *root)
|
||||
{
|
||||
struct cls_cgroup_head *head = container_of(root,
|
||||
struct cls_cgroup_head,
|
||||
rcu);
|
||||
|
||||
tcf_exts_destroy(&head->exts);
|
||||
tcf_em_tree_destroy(&head->ematches);
|
||||
kfree(head);
|
||||
INIT_WORK(&head->work, cls_cgroup_destroy_work);
|
||||
tcf_queue_work(&head->work);
|
||||
}
|
||||
|
||||
static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
|
||||
|
|
|
@ -57,7 +57,10 @@ struct flow_filter {
|
|||
u32 divisor;
|
||||
u32 baseclass;
|
||||
u32 hashrnd;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
};
|
||||
|
||||
static inline u32 addr_fold(void *addr)
|
||||
|
@ -369,14 +372,24 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
|
|||
[TCA_FLOW_PERTURB] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static void flow_destroy_filter(struct rcu_head *head)
|
||||
static void flow_destroy_filter_work(struct work_struct *work)
|
||||
{
|
||||
struct flow_filter *f = container_of(head, struct flow_filter, rcu);
|
||||
struct flow_filter *f = container_of(work, struct flow_filter, work);
|
||||
|
||||
rtnl_lock();
|
||||
del_timer_sync(&f->perturb_timer);
|
||||
tcf_exts_destroy(&f->exts);
|
||||
tcf_em_tree_destroy(&f->ematches);
|
||||
kfree(f);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void flow_destroy_filter(struct rcu_head *head)
|
||||
{
|
||||
struct flow_filter *f = container_of(head, struct flow_filter, rcu);
|
||||
|
||||
INIT_WORK(&f->work, flow_destroy_filter_work);
|
||||
tcf_queue_work(&f->work);
|
||||
}
|
||||
|
||||
static int flow_change(struct net *net, struct sk_buff *in_skb,
|
||||
|
|
|
@ -87,7 +87,10 @@ struct cls_fl_filter {
|
|||
struct list_head list;
|
||||
u32 handle;
|
||||
u32 flags;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
struct net_device *hw_dev;
|
||||
};
|
||||
|
||||
|
@ -215,12 +218,22 @@ static int fl_init(struct tcf_proto *tp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void fl_destroy_filter_work(struct work_struct *work)
|
||||
{
|
||||
struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
|
||||
|
||||
rtnl_lock();
|
||||
tcf_exts_destroy(&f->exts);
|
||||
kfree(f);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void fl_destroy_filter(struct rcu_head *head)
|
||||
{
|
||||
struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
|
||||
|
||||
tcf_exts_destroy(&f->exts);
|
||||
kfree(f);
|
||||
INIT_WORK(&f->work, fl_destroy_filter_work);
|
||||
tcf_queue_work(&f->work);
|
||||
}
|
||||
|
||||
static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
|
||||
|
|
|
@ -46,7 +46,10 @@ struct fw_filter {
|
|||
#endif /* CONFIG_NET_CLS_IND */
|
||||
struct tcf_exts exts;
|
||||
struct tcf_proto *tp;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
};
|
||||
|
||||
static u32 fw_hash(u32 handle)
|
||||
|
@ -119,12 +122,22 @@ static int fw_init(struct tcf_proto *tp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void fw_delete_filter_work(struct work_struct *work)
|
||||
{
|
||||
struct fw_filter *f = container_of(work, struct fw_filter, work);
|
||||
|
||||
rtnl_lock();
|
||||
tcf_exts_destroy(&f->exts);
|
||||
kfree(f);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void fw_delete_filter(struct rcu_head *head)
|
||||
{
|
||||
struct fw_filter *f = container_of(head, struct fw_filter, rcu);
|
||||
|
||||
tcf_exts_destroy(&f->exts);
|
||||
kfree(f);
|
||||
INIT_WORK(&f->work, fw_delete_filter_work);
|
||||
tcf_queue_work(&f->work);
|
||||
}
|
||||
|
||||
static void fw_destroy(struct tcf_proto *tp)
|
||||
|
|
|
@ -21,7 +21,10 @@ struct cls_mall_head {
|
|||
struct tcf_result res;
|
||||
u32 handle;
|
||||
u32 flags;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
};
|
||||
|
||||
static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
|
@ -41,13 +44,23 @@ static int mall_init(struct tcf_proto *tp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mall_destroy_work(struct work_struct *work)
|
||||
{
|
||||
struct cls_mall_head *head = container_of(work, struct cls_mall_head,
|
||||
work);
|
||||
rtnl_lock();
|
||||
tcf_exts_destroy(&head->exts);
|
||||
kfree(head);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void mall_destroy_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
|
||||
rcu);
|
||||
|
||||
tcf_exts_destroy(&head->exts);
|
||||
kfree(head);
|
||||
INIT_WORK(&head->work, mall_destroy_work);
|
||||
tcf_queue_work(&head->work);
|
||||
}
|
||||
|
||||
static int mall_replace_hw_filter(struct tcf_proto *tp,
|
||||
|
|
|
@ -57,7 +57,10 @@ struct route4_filter {
|
|||
u32 handle;
|
||||
struct route4_bucket *bkt;
|
||||
struct tcf_proto *tp;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
};
|
||||
|
||||
#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
|
||||
|
@ -254,12 +257,22 @@ static int route4_init(struct tcf_proto *tp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void route4_delete_filter_work(struct work_struct *work)
|
||||
{
|
||||
struct route4_filter *f = container_of(work, struct route4_filter, work);
|
||||
|
||||
rtnl_lock();
|
||||
tcf_exts_destroy(&f->exts);
|
||||
kfree(f);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void route4_delete_filter(struct rcu_head *head)
|
||||
{
|
||||
struct route4_filter *f = container_of(head, struct route4_filter, rcu);
|
||||
|
||||
tcf_exts_destroy(&f->exts);
|
||||
kfree(f);
|
||||
INIT_WORK(&f->work, route4_delete_filter_work);
|
||||
tcf_queue_work(&f->work);
|
||||
}
|
||||
|
||||
static void route4_destroy(struct tcf_proto *tp)
|
||||
|
|
|
@ -97,7 +97,10 @@ struct rsvp_filter {
|
|||
|
||||
u32 handle;
|
||||
struct rsvp_session *sess;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
};
|
||||
|
||||
static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
|
||||
|
@ -282,12 +285,22 @@ static int rsvp_init(struct tcf_proto *tp)
|
|||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
static void rsvp_delete_filter_work(struct work_struct *work)
|
||||
{
|
||||
struct rsvp_filter *f = container_of(work, struct rsvp_filter, work);
|
||||
|
||||
rtnl_lock();
|
||||
tcf_exts_destroy(&f->exts);
|
||||
kfree(f);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void rsvp_delete_filter_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
|
||||
|
||||
tcf_exts_destroy(&f->exts);
|
||||
kfree(f);
|
||||
INIT_WORK(&f->work, rsvp_delete_filter_work);
|
||||
tcf_queue_work(&f->work);
|
||||
}
|
||||
|
||||
static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
|
||||
|
|
|
@ -27,14 +27,20 @@
|
|||
struct tcindex_filter_result {
|
||||
struct tcf_exts exts;
|
||||
struct tcf_result res;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
};
|
||||
|
||||
struct tcindex_filter {
|
||||
u16 key;
|
||||
struct tcindex_filter_result result;
|
||||
struct tcindex_filter __rcu *next;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
@ -133,12 +139,34 @@ static int tcindex_init(struct tcf_proto *tp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void tcindex_destroy_rexts_work(struct work_struct *work)
|
||||
{
|
||||
struct tcindex_filter_result *r;
|
||||
|
||||
r = container_of(work, struct tcindex_filter_result, work);
|
||||
rtnl_lock();
|
||||
tcf_exts_destroy(&r->exts);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void tcindex_destroy_rexts(struct rcu_head *head)
|
||||
{
|
||||
struct tcindex_filter_result *r;
|
||||
|
||||
r = container_of(head, struct tcindex_filter_result, rcu);
|
||||
tcf_exts_destroy(&r->exts);
|
||||
INIT_WORK(&r->work, tcindex_destroy_rexts_work);
|
||||
tcf_queue_work(&r->work);
|
||||
}
|
||||
|
||||
static void tcindex_destroy_fexts_work(struct work_struct *work)
|
||||
{
|
||||
struct tcindex_filter *f = container_of(work, struct tcindex_filter,
|
||||
work);
|
||||
|
||||
rtnl_lock();
|
||||
tcf_exts_destroy(&f->result.exts);
|
||||
kfree(f);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void tcindex_destroy_fexts(struct rcu_head *head)
|
||||
|
@ -146,8 +174,8 @@ static void tcindex_destroy_fexts(struct rcu_head *head)
|
|||
struct tcindex_filter *f = container_of(head, struct tcindex_filter,
|
||||
rcu);
|
||||
|
||||
tcf_exts_destroy(&f->result.exts);
|
||||
kfree(f);
|
||||
INIT_WORK(&f->work, tcindex_destroy_fexts_work);
|
||||
tcf_queue_work(&f->work);
|
||||
}
|
||||
|
||||
static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
|
||||
|
|
|
@ -68,7 +68,10 @@ struct tc_u_knode {
|
|||
u32 __percpu *pcpu_success;
|
||||
#endif
|
||||
struct tcf_proto *tp;
|
||||
struct rcu_head rcu;
|
||||
union {
|
||||
struct work_struct work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
/* The 'sel' field MUST be the last field in structure to allow for
|
||||
* tc_u32_keys allocated at end of structure.
|
||||
*/
|
||||
|
@ -418,11 +421,21 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
|
|||
* this the u32_delete_key_rcu variant does not free the percpu
|
||||
* statistics.
|
||||
*/
|
||||
static void u32_delete_key_work(struct work_struct *work)
|
||||
{
|
||||
struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
|
||||
|
||||
rtnl_lock();
|
||||
u32_destroy_key(key->tp, key, false);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void u32_delete_key_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
|
||||
|
||||
u32_destroy_key(key->tp, key, false);
|
||||
INIT_WORK(&key->work, u32_delete_key_work);
|
||||
tcf_queue_work(&key->work);
|
||||
}
|
||||
|
||||
/* u32_delete_key_freepf_rcu is the rcu callback variant
|
||||
|
@ -432,11 +445,21 @@ static void u32_delete_key_rcu(struct rcu_head *rcu)
|
|||
* for the variant that should be used with keys return from
|
||||
* u32_init_knode()
|
||||
*/
|
||||
static void u32_delete_key_freepf_work(struct work_struct *work)
|
||||
{
|
||||
struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
|
||||
|
||||
rtnl_lock();
|
||||
u32_destroy_key(key->tp, key, true);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
|
||||
|
||||
u32_destroy_key(key->tp, key, true);
|
||||
INIT_WORK(&key->work, u32_delete_key_freepf_work);
|
||||
tcf_queue_work(&key->work);
|
||||
}
|
||||
|
||||
static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
|
||||
|
|
|
@ -17,5 +17,26 @@
|
|||
"teardown": [
|
||||
"$TC qdisc del dev $DEV1 ingress"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "d052",
|
||||
"name": "Add 1M filters with the same action",
|
||||
"category": [
|
||||
"filter",
|
||||
"flower"
|
||||
],
|
||||
"setup": [
|
||||
"$TC qdisc add dev $DEV2 ingress",
|
||||
"./tdc_batch.py $DEV2 $BATCH_FILE --share_action -n 1000000"
|
||||
],
|
||||
"cmdUnderTest": "$TC -b $BATCH_FILE",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC actions list action gact",
|
||||
"matchPattern": "action order 0: gact action drop.*index 1 ref 1000000 bind 1000000",
|
||||
"matchCount": "1",
|
||||
"teardown": [
|
||||
"$TC qdisc del dev $DEV2 ingress",
|
||||
"/bin/rm $BATCH_FILE"
|
||||
]
|
||||
}
|
||||
]
|
||||
]
|
||||
|
|
|
@ -88,7 +88,7 @@ def prepare_env(cmdlist):
|
|||
exit(1)
|
||||
|
||||
|
||||
def test_runner(filtered_tests):
|
||||
def test_runner(filtered_tests, args):
|
||||
"""
|
||||
Driver function for the unit tests.
|
||||
|
||||
|
@ -105,6 +105,8 @@ def test_runner(filtered_tests):
|
|||
for tidx in testlist:
|
||||
result = True
|
||||
tresult = ""
|
||||
if "flower" in tidx["category"] and args.device == None:
|
||||
continue
|
||||
print("Test " + tidx["id"] + ": " + tidx["name"])
|
||||
prepare_env(tidx["setup"])
|
||||
(p, procout) = exec_cmd(tidx["cmdUnderTest"])
|
||||
|
@ -152,6 +154,10 @@ def ns_create():
|
|||
exec_cmd(cmd, False)
|
||||
cmd = 'ip -s $NS link set $DEV1 up'
|
||||
exec_cmd(cmd, False)
|
||||
cmd = 'ip link set $DEV2 netns $NS'
|
||||
exec_cmd(cmd, False)
|
||||
cmd = 'ip -s $NS link set $DEV2 up'
|
||||
exec_cmd(cmd, False)
|
||||
|
||||
|
||||
def ns_destroy():
|
||||
|
@ -211,7 +217,8 @@ def set_args(parser):
|
|||
help='Execute the single test case with specified ID')
|
||||
parser.add_argument('-i', '--id', action='store_true', dest='gen_id',
|
||||
help='Generate ID numbers for new test cases')
|
||||
return parser
|
||||
parser.add_argument('-d', '--device',
|
||||
help='Execute the test case in flower category')
|
||||
return parser
|
||||
|
||||
|
||||
|
@ -225,6 +232,8 @@ def check_default_settings(args):
|
|||
|
||||
if args.path != None:
|
||||
NAMES['TC'] = args.path
|
||||
if args.device != None:
|
||||
NAMES['DEV2'] = args.device
|
||||
if not os.path.isfile(NAMES['TC']):
|
||||
print("The specified tc path " + NAMES['TC'] + " does not exist.")
|
||||
exit(1)
|
||||
|
@ -381,14 +390,17 @@ def set_operation_mode(args):
|
|||
if (len(alltests) == 0):
|
||||
print("Cannot find a test case with ID matching " + target_id)
|
||||
exit(1)
|
||||
catresults = test_runner(alltests)
|
||||
catresults = test_runner(alltests, args)
|
||||
print("All test results: " + "\n\n" + catresults)
|
||||
elif (len(target_category) > 0):
|
||||
if (target_category == "flower") and args.device == None:
|
||||
print("Please specify a NIC device (-d) to run category flower")
|
||||
exit(1)
|
||||
if (target_category not in ucat):
|
||||
print("Specified category is not present in this file.")
|
||||
exit(1)
|
||||
else:
|
||||
catresults = test_runner(testcases[target_category])
|
||||
catresults = test_runner(testcases[target_category], args)
|
||||
print("Category " + target_category + "\n\n" + catresults)
|
||||
|
||||
ns_destroy()
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
tdc_batch.py - a script to generate TC batch file
|
||||
|
||||
Copyright (C) 2017 Chris Mi <chrism@mellanox.com>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='TC batch file generator')
|
||||
parser.add_argument("device", help="device name")
|
||||
parser.add_argument("file", help="batch file name")
|
||||
parser.add_argument("-n", "--number", type=int,
|
||||
help="how many lines in batch file")
|
||||
parser.add_argument("-o", "--skip_sw",
|
||||
help="skip_sw (offload), by default skip_hw",
|
||||
action="store_true")
|
||||
parser.add_argument("-s", "--share_action",
|
||||
help="all filters share the same action",
|
||||
action="store_true")
|
||||
parser.add_argument("-p", "--prio",
|
||||
help="all filters have different prio",
|
||||
action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
device = args.device
|
||||
file = open(args.file, 'w')
|
||||
|
||||
number = 1
|
||||
if args.number:
|
||||
number = args.number
|
||||
|
||||
skip = "skip_hw"
|
||||
if args.skip_sw:
|
||||
skip = "skip_sw"
|
||||
|
||||
share_action = ""
|
||||
if args.share_action:
|
||||
share_action = "index 1"
|
||||
|
||||
prio = "prio 1"
|
||||
if args.prio:
|
||||
prio = ""
|
||||
if number > 0x4000:
|
||||
number = 0x4000
|
||||
|
||||
index = 0
|
||||
for i in range(0x100):
|
||||
for j in range(0x100):
|
||||
for k in range(0x100):
|
||||
mac = ("%02x:%02x:%02x" % (i, j, k))
|
||||
src_mac = "e4:11:00:" + mac
|
||||
dst_mac = "e4:12:00:" + mac
|
||||
cmd = ("filter add dev %s %s protocol ip parent ffff: flower %s "
|
||||
"src_mac %s dst_mac %s action drop %s" %
|
||||
(device, prio, skip, src_mac, dst_mac, share_action))
|
||||
file.write("%s\n" % cmd)
|
||||
index += 1
|
||||
if index >= number:
|
||||
file.close()
|
||||
exit(0)
|
|
@ -12,6 +12,8 @@ NAMES = {
|
|||
# Name of veth devices to be created for the namespace
|
||||
'DEV0': 'v0p0',
|
||||
'DEV1': 'v0p1',
|
||||
'DEV2': '',
|
||||
'BATCH_FILE': './batch.txt',
|
||||
# Name of the namespace to use
|
||||
'NS': 'tcut'
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue