net/sched: remove block pointer from common offload structure

Based on feedback from Jiri avoid carrying a pointer to the tcf_block
structure in the tc_cls_common_offload structure. Instead store
a flag in driver private data which indicates if offloads apply
to a shared block at block binding time.

Suggested-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Pieter Jansen van Vuuren 2019-05-06 17:24:21 -07:00 committed by David S. Miller
parent 2e7ae67b5f
commit d6787147e1
8 changed files with 25 additions and 36 deletions

View File

@ -215,6 +215,7 @@ struct nfp_fl_qos {
* @lag_port_flags: Extended port flags to record lag state of repr * @lag_port_flags: Extended port flags to record lag state of repr
* @mac_offloaded: Flag indicating a MAC address is offloaded for repr * @mac_offloaded: Flag indicating a MAC address is offloaded for repr
* @offloaded_mac_addr: MAC address that has been offloaded for repr * @offloaded_mac_addr: MAC address that has been offloaded for repr
* @block_shared: Flag indicating if offload applies to shared blocks
* @mac_list: List entry of reprs that share the same offloaded MAC * @mac_list: List entry of reprs that share the same offloaded MAC
* @qos_table: Stored info on filters implementing qos * @qos_table: Stored info on filters implementing qos
*/ */
@ -223,6 +224,7 @@ struct nfp_flower_repr_priv {
unsigned long lag_port_flags; unsigned long lag_port_flags;
bool mac_offloaded; bool mac_offloaded;
u8 offloaded_mac_addr[ETH_ALEN]; u8 offloaded_mac_addr[ETH_ALEN];
bool block_shared;
struct list_head mac_list; struct list_head mac_list;
struct nfp_fl_qos qos_table; struct nfp_fl_qos qos_table;
}; };

View File

@ -1197,10 +1197,14 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
struct tc_block_offload *f) struct tc_block_offload *f)
{ {
struct nfp_repr *repr = netdev_priv(netdev); struct nfp_repr *repr = netdev_priv(netdev);
struct nfp_flower_repr_priv *repr_priv;
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP; return -EOPNOTSUPP;
repr_priv = repr->app_priv;
repr_priv->block_shared = tcf_block_shared(f->block);
switch (f->command) { switch (f->command) {
case TC_BLOCK_BIND: case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, return tcf_block_cb_register(f->block,

View File

@ -76,8 +76,9 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
repr = netdev_priv(netdev); repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
if (tcf_block_shared(flow->common.block)) { if (repr_priv->block_shared) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks"); NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -123,7 +124,6 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
config->cir = cpu_to_be32(rate); config->cir = cpu_to_be32(rate);
nfp_ctrl_tx(repr->app->ctrl, skb); nfp_ctrl_tx(repr->app->ctrl, skb);
repr_priv = repr->app_priv;
repr_priv->qos_table.netdev_port_id = netdev_port_id; repr_priv->qos_table.netdev_port_id = netdev_port_id;
fl_priv->qos_rate_limiters++; fl_priv->qos_rate_limiters++;
if (fl_priv->qos_rate_limiters == 1) if (fl_priv->qos_rate_limiters == 1)

View File

@ -629,7 +629,6 @@ struct tc_cls_common_offload {
u32 chain_index; u32 chain_index;
__be16 protocol; __be16 protocol;
u32 prio; u32 prio;
struct tcf_block *block;
struct netlink_ext_ack *extack; struct netlink_ext_ack *extack;
}; };
@ -731,13 +730,11 @@ static inline bool tc_in_hw(u32 flags)
static inline void static inline void
tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
const struct tcf_proto *tp, u32 flags, const struct tcf_proto *tp, u32 flags,
struct tcf_block *block,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
cls_common->chain_index = tp->chain->index; cls_common->chain_index = tp->chain->index;
cls_common->protocol = tp->protocol; cls_common->protocol = tp->protocol;
cls_common->prio = tp->prio; cls_common->prio = tp->prio;
cls_common->block = block;
if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
cls_common->extack = extack; cls_common->extack = extack;
} }

View File

@ -157,8 +157,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
skip_sw = prog && tc_skip_sw(prog->gen_flags); skip_sw = prog && tc_skip_sw(prog->gen_flags);
obj = prog ?: oldprog; obj = prog ?: oldprog;
tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, block, tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
extack);
cls_bpf.command = TC_CLSBPF_OFFLOAD; cls_bpf.command = TC_CLSBPF_OFFLOAD;
cls_bpf.exts = &obj->exts; cls_bpf.exts = &obj->exts;
cls_bpf.prog = prog ? prog->filter : NULL; cls_bpf.prog = prog ? prog->filter : NULL;
@ -227,8 +226,7 @@ static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
struct tc_cls_bpf_offload cls_bpf = {}; struct tc_cls_bpf_offload cls_bpf = {};
tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, block, tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
NULL);
cls_bpf.command = TC_CLSBPF_STATS; cls_bpf.command = TC_CLSBPF_STATS;
cls_bpf.exts = &prog->exts; cls_bpf.exts = &prog->exts;
cls_bpf.prog = prog->filter; cls_bpf.prog = prog->filter;
@ -670,7 +668,7 @@ static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
continue; continue;
tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
block, extack); extack);
cls_bpf.command = TC_CLSBPF_OFFLOAD; cls_bpf.command = TC_CLSBPF_OFFLOAD;
cls_bpf.exts = &prog->exts; cls_bpf.exts = &prog->exts;
cls_bpf.prog = add ? prog->filter : NULL; cls_bpf.prog = add ? prog->filter : NULL;

View File

@ -389,8 +389,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
if (!rtnl_held) if (!rtnl_held)
rtnl_lock(); rtnl_lock();
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block, tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
extack);
cls_flower.command = TC_CLSFLOWER_DESTROY; cls_flower.command = TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long) f; cls_flower.cookie = (unsigned long) f;
@ -423,8 +422,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
goto errout; goto errout;
} }
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block, tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
extack);
cls_flower.command = TC_CLSFLOWER_REPLACE; cls_flower.command = TC_CLSFLOWER_REPLACE;
cls_flower.cookie = (unsigned long) f; cls_flower.cookie = (unsigned long) f;
cls_flower.rule->match.dissector = &f->mask->dissector; cls_flower.rule->match.dissector = &f->mask->dissector;
@ -480,8 +478,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
if (!rtnl_held) if (!rtnl_held)
rtnl_lock(); rtnl_lock();
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block, tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
NULL);
cls_flower.command = TC_CLSFLOWER_STATS; cls_flower.command = TC_CLSFLOWER_STATS;
cls_flower.cookie = (unsigned long) f; cls_flower.cookie = (unsigned long) f;
cls_flower.classid = f->res.classid; cls_flower.classid = f->res.classid;
@ -1760,7 +1757,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
} }
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
block, extack); extack);
cls_flower.command = add ? cls_flower.command = add ?
TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY; TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long)f; cls_flower.cookie = (unsigned long)f;

View File

@ -71,8 +71,7 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
struct tc_cls_matchall_offload cls_mall = {}; struct tc_cls_matchall_offload cls_mall = {};
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block, tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
extack);
cls_mall.command = TC_CLSMATCHALL_DESTROY; cls_mall.command = TC_CLSMATCHALL_DESTROY;
cls_mall.cookie = cookie; cls_mall.cookie = cookie;
@ -94,8 +93,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
if (!cls_mall.rule) if (!cls_mall.rule)
return -ENOMEM; return -ENOMEM;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block, tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
extack);
cls_mall.command = TC_CLSMATCHALL_REPLACE; cls_mall.command = TC_CLSMATCHALL_REPLACE;
cls_mall.cookie = cookie; cls_mall.cookie = cookie;
@ -295,8 +293,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
if (!cls_mall.rule) if (!cls_mall.rule)
return -ENOMEM; return -ENOMEM;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block, tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
extack);
cls_mall.command = add ? cls_mall.command = add ?
TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
cls_mall.cookie = (unsigned long)head; cls_mall.cookie = (unsigned long)head;
@ -331,8 +328,7 @@ static void mall_stats_hw_filter(struct tcf_proto *tp,
struct tc_cls_matchall_offload cls_mall = {}; struct tc_cls_matchall_offload cls_mall = {};
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block, tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
NULL);
cls_mall.command = TC_CLSMATCHALL_STATS; cls_mall.command = TC_CLSMATCHALL_STATS;
cls_mall.cookie = cookie; cls_mall.cookie = cookie;

View File

@ -485,8 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, block, tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
extack);
cls_u32.command = TC_CLSU32_DELETE_HNODE; cls_u32.command = TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = h->divisor; cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle; cls_u32.hnode.handle = h->handle;
@ -504,7 +503,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
bool offloaded = false; bool offloaded = false;
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, flags, block, extack); tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
cls_u32.command = TC_CLSU32_NEW_HNODE; cls_u32.command = TC_CLSU32_NEW_HNODE;
cls_u32.hnode.divisor = h->divisor; cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle; cls_u32.hnode.handle = h->handle;
@ -530,8 +529,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, block, tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
extack);
cls_u32.command = TC_CLSU32_DELETE_KNODE; cls_u32.command = TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = n->handle; cls_u32.knode.handle = n->handle;
@ -548,7 +546,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
bool skip_sw = tc_skip_sw(flags); bool skip_sw = tc_skip_sw(flags);
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, flags, block, extack); tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
cls_u32.command = TC_CLSU32_REPLACE_KNODE; cls_u32.command = TC_CLSU32_REPLACE_KNODE;
cls_u32.knode.handle = n->handle; cls_u32.knode.handle = n->handle;
cls_u32.knode.fshift = n->fshift; cls_u32.knode.fshift = n->fshift;
@ -1172,12 +1170,10 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
bool add, tc_setup_cb_t *cb, void *cb_priv, bool add, tc_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, block, tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
extack);
cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE; cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = ht->divisor; cls_u32.hnode.divisor = ht->divisor;
cls_u32.hnode.handle = ht->handle; cls_u32.hnode.handle = ht->handle;
@ -1199,8 +1195,7 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, block, tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
extack);
cls_u32.command = add ? cls_u32.command = add ?
TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE; TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = n->handle; cls_u32.knode.handle = n->handle;