net: sched: Pass qdisc reference in struct flow_block_offload
Previously, shared blocks were only relevant for the pseudo-qdiscs ingress and clsact. Recently, a qevent facility was introduced, which allows to bind blocks to well-defined slots of a qdisc instance. RED in particular got two qevents: early_drop and mark. Drivers that wish to offload these blocks will be sent the usual notification, and need to know which qdisc it is related to. To that end, extend flow_block_offload with a "sch" pointer, and initialize as appropriate. This prompts changes in the indirect block facility, which now tracks the scheduler in addition to the netdevice. Update signatures of several functions similarly. Signed-off-by: Petr Machata <petrm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e1d82f7ad0
commit
c40f4e50b6
|
@ -1888,7 +1888,7 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv)
|
|||
kfree(priv);
|
||||
}
|
||||
|
||||
static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
|
||||
static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp,
|
||||
struct flow_block_offload *f, void *data,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb))
|
||||
{
|
||||
|
@ -1911,7 +1911,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
|
|||
block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
|
||||
cb_priv, cb_priv,
|
||||
bnxt_tc_setup_indr_rel, f,
|
||||
netdev, data, bp, cleanup);
|
||||
netdev, sch, data, bp, cleanup);
|
||||
if (IS_ERR(block_cb)) {
|
||||
list_del(&cb_priv->list);
|
||||
kfree(cb_priv);
|
||||
|
@ -1946,7 +1946,7 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
|
|||
return netif_is_vxlan(netdev);
|
||||
}
|
||||
|
||||
static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
|
||||
static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
|
||||
enum tc_setup_type type, void *type_data,
|
||||
void *data,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb))
|
||||
|
@ -1956,8 +1956,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
|
|||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data,
|
||||
cleanup);
|
||||
return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -404,7 +404,7 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv)
|
|||
static LIST_HEAD(mlx5e_block_cb_list);
|
||||
|
||||
static int
|
||||
mlx5e_rep_indr_setup_block(struct net_device *netdev,
|
||||
mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
|
||||
struct mlx5e_rep_priv *rpriv,
|
||||
struct flow_block_offload *f,
|
||||
flow_setup_cb_t *setup_cb,
|
||||
|
@ -442,7 +442,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
|
|||
|
||||
block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
|
||||
mlx5e_rep_indr_block_unbind,
|
||||
f, netdev, data, rpriv,
|
||||
f, netdev, sch, data, rpriv,
|
||||
cleanup);
|
||||
if (IS_ERR(block_cb)) {
|
||||
list_del(&indr_priv->list);
|
||||
|
@ -472,18 +472,18 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
|
|||
}
|
||||
|
||||
static
|
||||
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
|
||||
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
|
||||
enum tc_setup_type type, void *type_data,
|
||||
void *data,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb))
|
||||
{
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
|
||||
return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
|
||||
mlx5e_rep_indr_setup_tc_cb,
|
||||
data, cleanup);
|
||||
case TC_SETUP_FT:
|
||||
return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
|
||||
return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
|
||||
mlx5e_rep_indr_setup_ft_cb,
|
||||
data, cleanup);
|
||||
default:
|
||||
|
|
|
@ -458,7 +458,7 @@ void nfp_flower_qos_cleanup(struct nfp_app *app);
|
|||
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
|
||||
struct tc_cls_matchall_offload *flow);
|
||||
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
|
||||
int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
|
||||
int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
|
||||
enum tc_setup_type type, void *type_data,
|
||||
void *data,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb));
|
||||
|
|
|
@ -1646,7 +1646,7 @@ void nfp_flower_setup_indr_tc_release(void *cb_priv)
|
|||
}
|
||||
|
||||
static int
|
||||
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
|
||||
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app,
|
||||
struct flow_block_offload *f, void *data,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb))
|
||||
{
|
||||
|
@ -1680,7 +1680,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
|
|||
block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
|
||||
cb_priv, cb_priv,
|
||||
nfp_flower_setup_indr_tc_release,
|
||||
f, netdev, data, app, cleanup);
|
||||
f, netdev, sch, data, app, cleanup);
|
||||
if (IS_ERR(block_cb)) {
|
||||
list_del(&cb_priv->list);
|
||||
kfree(cb_priv);
|
||||
|
@ -1711,7 +1711,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
|
|||
}
|
||||
|
||||
int
|
||||
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
|
||||
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
|
||||
enum tc_setup_type type, void *type_data,
|
||||
void *data,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb))
|
||||
|
@ -1721,7 +1721,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
|
|||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
|
||||
return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv,
|
||||
type_data, data, cleanup);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -444,6 +444,7 @@ struct flow_block_offload {
|
|||
struct list_head cb_list;
|
||||
struct list_head *driver_block_list;
|
||||
struct netlink_ext_ack *extack;
|
||||
struct Qdisc *sch;
|
||||
};
|
||||
|
||||
enum tc_setup_type;
|
||||
|
@ -455,6 +456,7 @@ struct flow_block_cb;
|
|||
struct flow_block_indr {
|
||||
struct list_head list;
|
||||
struct net_device *dev;
|
||||
struct Qdisc *sch;
|
||||
enum flow_block_binder_type binder_type;
|
||||
void *data;
|
||||
void *cb_priv;
|
||||
|
@ -479,7 +481,8 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
|
|||
void *cb_ident, void *cb_priv,
|
||||
void (*release)(void *cb_priv),
|
||||
struct flow_block_offload *bo,
|
||||
struct net_device *dev, void *data,
|
||||
struct net_device *dev,
|
||||
struct Qdisc *sch, void *data,
|
||||
void *indr_cb_priv,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb));
|
||||
void flow_block_cb_free(struct flow_block_cb *block_cb);
|
||||
|
@ -553,7 +556,7 @@ static inline void flow_block_init(struct flow_block *flow_block)
|
|||
INIT_LIST_HEAD(&flow_block->cb_list);
|
||||
}
|
||||
|
||||
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
|
||||
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
|
||||
enum tc_setup_type type, void *type_data,
|
||||
void *data,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb));
|
||||
|
@ -561,7 +564,7 @@ typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
|
|||
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
|
||||
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
|
||||
void (*release)(void *cb_priv));
|
||||
int flow_indr_dev_setup_offload(struct net_device *dev,
|
||||
int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
|
||||
enum tc_setup_type type, void *data,
|
||||
struct flow_block_offload *bo,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb));
|
||||
|
|
|
@ -429,7 +429,7 @@ EXPORT_SYMBOL(flow_indr_dev_unregister);
|
|||
|
||||
static void flow_block_indr_init(struct flow_block_cb *flow_block,
|
||||
struct flow_block_offload *bo,
|
||||
struct net_device *dev, void *data,
|
||||
struct net_device *dev, struct Qdisc *sch, void *data,
|
||||
void *cb_priv,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb))
|
||||
{
|
||||
|
@ -437,6 +437,7 @@ static void flow_block_indr_init(struct flow_block_cb *flow_block,
|
|||
flow_block->indr.data = data;
|
||||
flow_block->indr.cb_priv = cb_priv;
|
||||
flow_block->indr.dev = dev;
|
||||
flow_block->indr.sch = sch;
|
||||
flow_block->indr.cleanup = cleanup;
|
||||
}
|
||||
|
||||
|
@ -444,7 +445,8 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
|
|||
void *cb_ident, void *cb_priv,
|
||||
void (*release)(void *cb_priv),
|
||||
struct flow_block_offload *bo,
|
||||
struct net_device *dev, void *data,
|
||||
struct net_device *dev,
|
||||
struct Qdisc *sch, void *data,
|
||||
void *indr_cb_priv,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb))
|
||||
{
|
||||
|
@ -454,7 +456,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
|
|||
if (IS_ERR(block_cb))
|
||||
goto out;
|
||||
|
||||
flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup);
|
||||
flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
|
||||
list_add(&block_cb->indr.list, &flow_block_indr_list);
|
||||
|
||||
out:
|
||||
|
@ -462,7 +464,7 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL(flow_indr_block_cb_alloc);
|
||||
|
||||
int flow_indr_dev_setup_offload(struct net_device *dev,
|
||||
int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
|
||||
enum tc_setup_type type, void *data,
|
||||
struct flow_block_offload *bo,
|
||||
void (*cleanup)(struct flow_block_cb *block_cb))
|
||||
|
@ -471,7 +473,7 @@ int flow_indr_dev_setup_offload(struct net_device *dev,
|
|||
|
||||
mutex_lock(&flow_indr_block_lock);
|
||||
list_for_each_entry(this, &flow_block_indr_dev_list, list)
|
||||
this->cb(dev, this->cb_priv, type, bo, data, cleanup);
|
||||
this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
|
||||
|
||||
mutex_unlock(&flow_indr_block_lock);
|
||||
|
||||
|
|
|
@ -964,7 +964,7 @@ static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
|
|||
nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
|
||||
extack);
|
||||
|
||||
return flow_indr_dev_setup_offload(dev, TC_SETUP_FT, flowtable, bo,
|
||||
return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo,
|
||||
nf_flow_table_indr_cleanup);
|
||||
}
|
||||
|
||||
|
|
|
@ -312,7 +312,7 @@ static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
|
|||
|
||||
nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
|
||||
|
||||
err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, basechain, &bo,
|
||||
err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
|
||||
nft_indr_block_cleanup);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
|
|
@ -622,7 +622,7 @@ static int tcf_block_setup(struct tcf_block *block,
|
|||
struct flow_block_offload *bo);
|
||||
|
||||
static void tcf_block_offload_init(struct flow_block_offload *bo,
|
||||
struct net_device *dev,
|
||||
struct net_device *dev, struct Qdisc *sch,
|
||||
enum flow_block_command command,
|
||||
enum flow_block_binder_type binder_type,
|
||||
struct flow_block *flow_block,
|
||||
|
@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo,
|
|||
bo->block = flow_block;
|
||||
bo->block_shared = shared;
|
||||
bo->extack = extack;
|
||||
bo->sch = sch;
|
||||
INIT_LIST_HEAD(&bo->cb_list);
|
||||
}
|
||||
|
||||
|
@ -644,10 +645,11 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
|
|||
{
|
||||
struct tcf_block *block = block_cb->indr.data;
|
||||
struct net_device *dev = block_cb->indr.dev;
|
||||
struct Qdisc *sch = block_cb->indr.sch;
|
||||
struct netlink_ext_ack extack = {};
|
||||
struct flow_block_offload bo;
|
||||
|
||||
tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND,
|
||||
tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
|
||||
block_cb->indr.binder_type,
|
||||
&block->flow_block, tcf_block_shared(block),
|
||||
&extack);
|
||||
|
@ -666,14 +668,14 @@ static bool tcf_block_offload_in_use(struct tcf_block *block)
|
|||
}
|
||||
|
||||
static int tcf_block_offload_cmd(struct tcf_block *block,
|
||||
struct net_device *dev,
|
||||
struct net_device *dev, struct Qdisc *sch,
|
||||
struct tcf_block_ext_info *ei,
|
||||
enum flow_block_command command,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct flow_block_offload bo = {};
|
||||
|
||||
tcf_block_offload_init(&bo, dev, command, ei->binder_type,
|
||||
tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
|
||||
&block->flow_block, tcf_block_shared(block),
|
||||
extack);
|
||||
|
||||
|
@ -690,7 +692,7 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
|
|||
return tcf_block_setup(block, &bo);
|
||||
}
|
||||
|
||||
flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo,
|
||||
flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
|
||||
tc_block_indr_cleanup);
|
||||
tcf_block_setup(block, &bo);
|
||||
|
||||
|
@ -717,7 +719,7 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
|
|||
goto err_unlock;
|
||||
}
|
||||
|
||||
err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
|
||||
err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
|
||||
if (err == -EOPNOTSUPP)
|
||||
goto no_offload_dev_inc;
|
||||
if (err)
|
||||
|
@ -744,7 +746,7 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
|
|||
int err;
|
||||
|
||||
down_write(&block->cb_lock);
|
||||
err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
|
||||
err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
|
||||
if (err == -EOPNOTSUPP)
|
||||
goto no_offload_dev_dec;
|
||||
up_write(&block->cb_lock);
|
||||
|
|
Loading…
Reference in New Issue