Merge branch 'net-sched-indirect-tc-block-cb-registration'
Jakub Kicinski says: ==================== net: sched: indirect tc block cb registration John says: This patchset introduces an alternative to egdev offload by allowing a driver to register for block updates when an external device (e.g. tunnel netdev) is bound to a TC block. Drivers can track new netdevs or register to existing ones to receive information on such events. Based on this, they may register for block offload rules using already existing functions. The patchset also implements this new indirect block registration in the NFP driver to allow the offloading of tunnel rules. The use of egdev offload (which is currently only used for tunnel offload) is subsequently removed. RFC v2 -> PATCH - removed embedded tracking function from indir block register (now up to driver to clean up after itself) - refactored NFP code due to recent submissions - removed priv list clean function in NFP (list should be cleared by indirect block unregisters) RFC v1->v2: - free allocated owner struct in block_owner_clean function - add geneve type helper function - move test stub in NFP (v1 patch 2) to full tunnel offload implementation via indirect blocks (v2 patches 3-8) ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
bb5e6a8290
|
@ -2,7 +2,6 @@
|
|||
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <net/geneve.h>
|
||||
#include <net/pkt_cls.h>
|
||||
#include <net/switchdev.h>
|
||||
#include <net/tc_act/tc_csum.h>
|
||||
|
@ -11,7 +10,6 @@
|
|||
#include <net/tc_act/tc_pedit.h>
|
||||
#include <net/tc_act/tc_vlan.h>
|
||||
#include <net/tc_act/tc_tunnel_key.h>
|
||||
#include <net/vxlan.h>
|
||||
|
||||
#include "cmsg.h"
|
||||
#include "main.h"
|
||||
|
@ -92,18 +90,6 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
|
|||
return act_size;
|
||||
}
|
||||
|
||||
static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
|
||||
enum nfp_flower_tun_type tun_type)
|
||||
{
|
||||
if (netif_is_vxlan(out_dev))
|
||||
return tun_type == NFP_FL_TUNNEL_VXLAN;
|
||||
|
||||
if (netif_is_geneve(out_dev))
|
||||
return tun_type == NFP_FL_TUNNEL_GENEVE;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
|
||||
const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
|
||||
|
@ -149,11 +135,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
|
|||
/* Set action output parameters. */
|
||||
output->flags = cpu_to_be16(tmp_flags);
|
||||
|
||||
/* Only offload if egress ports are on the same device as the
|
||||
* ingress port.
|
||||
*/
|
||||
if (!switchdev_port_same_parent_id(in_dev, out_dev))
|
||||
return -EOPNOTSUPP;
|
||||
if (nfp_netdev_is_nfp_repr(in_dev)) {
|
||||
/* Confirm ingress and egress are on same device. */
|
||||
if (!switchdev_port_same_parent_id(in_dev, out_dev))
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!nfp_netdev_is_nfp_repr(out_dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
@ -840,9 +827,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
|
|||
*a_len += sizeof(struct nfp_fl_push_vlan);
|
||||
} else if (is_tcf_tunnel_set(a)) {
|
||||
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
|
||||
*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
|
||||
*tun_type = nfp_fl_get_tun_from_act_l4_port(app, a);
|
||||
if (*tun_type == NFP_FL_TUNNEL_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/skbuff.h>
|
||||
#include <linux/types.h>
|
||||
#include <net/geneve.h>
|
||||
#include <net/vxlan.h>
|
||||
|
||||
#include "../nfp_app.h"
|
||||
#include "../nfpcore/nfp_cpp.h"
|
||||
|
@ -499,6 +500,32 @@ static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
|
|||
return skb->len - NFP_FLOWER_CMSG_HLEN;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
|
||||
enum nfp_flower_tun_type tun_type)
|
||||
{
|
||||
if (netif_is_vxlan(netdev))
|
||||
return tun_type == NFP_FL_TUNNEL_VXLAN;
|
||||
if (netif_is_geneve(netdev))
|
||||
return tun_type == NFP_FL_TUNNEL_GENEVE;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
|
||||
{
|
||||
if (!netdev->rtnl_link_ops)
|
||||
return false;
|
||||
if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
|
||||
return true;
|
||||
if (netif_is_vxlan(netdev))
|
||||
return true;
|
||||
if (netif_is_geneve(netdev))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
struct sk_buff *
|
||||
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
|
||||
void
|
||||
|
|
|
@ -146,23 +146,12 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
|
|||
return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
|
||||
{
|
||||
return tc_setup_cb_egdev_register(netdev,
|
||||
nfp_flower_setup_tc_egress_cb,
|
||||
netdev_priv(netdev));
|
||||
}
|
||||
|
||||
static void
|
||||
nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
|
||||
{
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
|
||||
kfree(repr->app_priv);
|
||||
|
||||
tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
|
||||
netdev_priv(netdev));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -568,6 +557,8 @@ static int nfp_flower_init(struct nfp_app *app)
|
|||
goto err_cleanup_metadata;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
|
||||
|
||||
return 0;
|
||||
|
||||
err_cleanup_metadata:
|
||||
|
@ -684,6 +675,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
|
||||
if (ret & NOTIFY_STOP_MASK)
|
||||
return ret;
|
||||
|
||||
return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
|
||||
}
|
||||
|
||||
|
@ -705,7 +700,6 @@ const struct nfp_app_type app_flower = {
|
|||
.vnic_init = nfp_flower_vnic_init,
|
||||
.vnic_clean = nfp_flower_vnic_clean,
|
||||
|
||||
.repr_init = nfp_flower_repr_netdev_init,
|
||||
.repr_preclean = nfp_flower_repr_netdev_preclean,
|
||||
.repr_clean = nfp_flower_repr_netdev_clean,
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ struct nfp_fl_pre_lag;
|
|||
struct net_device;
|
||||
struct nfp_app;
|
||||
|
||||
#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
|
||||
#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
|
||||
init_unalloc)
|
||||
#define NFP_FLOWER_MASK_ENTRY_RS 256
|
||||
|
@ -130,6 +129,7 @@ struct nfp_fl_lag {
|
|||
* @reify_wait_queue: wait queue for repr reify response counting
|
||||
* @mtu_conf: Configuration of repr MTU value
|
||||
* @nfp_lag: Link aggregation data block
|
||||
* @indr_block_cb_priv: List of priv data passed to indirect block cbs
|
||||
*/
|
||||
struct nfp_flower_priv {
|
||||
struct nfp_app *app;
|
||||
|
@ -162,6 +162,7 @@ struct nfp_flower_priv {
|
|||
wait_queue_head_t reify_wait_queue;
|
||||
struct nfp_mtu_conf mtu_conf;
|
||||
struct nfp_fl_lag nfp_lag;
|
||||
struct list_head indr_block_cb_priv;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -205,7 +206,6 @@ struct nfp_fl_payload {
|
|||
char *unmasked_data;
|
||||
char *mask_data;
|
||||
char *action_data;
|
||||
bool ingress_offload;
|
||||
};
|
||||
|
||||
extern const struct rhashtable_params nfp_flower_table_params;
|
||||
|
@ -222,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app);
|
|||
|
||||
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
|
||||
enum tc_setup_type type, void *type_data);
|
||||
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
|
||||
int nfp_flower_compile_flow_match(struct nfp_app *app,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
struct nfp_fl_key_ls *key_ls,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
|
@ -240,7 +241,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
|
|||
|
||||
struct nfp_fl_payload *
|
||||
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
|
||||
struct net_device *netdev, __be32 host_ctx);
|
||||
struct net_device *netdev);
|
||||
struct nfp_fl_payload *
|
||||
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
|
||||
|
||||
|
@ -256,8 +257,6 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
|
|||
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
|
||||
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
|
||||
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
|
||||
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
|
||||
void *cb_priv);
|
||||
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
|
||||
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
|
||||
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
|
||||
|
@ -270,5 +269,8 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
|
|||
struct nfp_fl_pre_lag *pre_act);
|
||||
int nfp_flower_lag_get_output_id(struct nfp_app *app,
|
||||
struct net_device *master);
|
||||
int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
|
||||
struct net_device *netdev,
|
||||
unsigned long event);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (tun_type)
|
||||
if (tun_type) {
|
||||
frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
|
||||
else
|
||||
} else {
|
||||
if (!cmsg_port)
|
||||
return -EOPNOTSUPP;
|
||||
frame->in_port = cpu_to_be32(cmsg_port);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
|
|||
}
|
||||
}
|
||||
|
||||
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
|
||||
int nfp_flower_compile_flow_match(struct nfp_app *app,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
struct nfp_fl_key_ls *key_ls,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
enum nfp_flower_tun_type tun_type)
|
||||
{
|
||||
struct nfp_repr *netdev_repr;
|
||||
u32 cmsg_port = 0;
|
||||
int err;
|
||||
u8 *ext;
|
||||
u8 *msk;
|
||||
|
||||
if (nfp_netdev_is_nfp_repr(netdev))
|
||||
cmsg_port = nfp_repr_get_port_id(netdev);
|
||||
|
||||
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
|
||||
memset(nfp_flow->mask_data, 0, key_ls->key_size);
|
||||
|
||||
|
@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
|
|||
|
||||
/* Populate Exact Port data. */
|
||||
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
|
||||
nfp_repr_get_port_id(netdev),
|
||||
false, tun_type);
|
||||
cmsg_port, false, tun_type);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Populate Mask Port Data. */
|
||||
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
|
||||
nfp_repr_get_port_id(netdev),
|
||||
true, tun_type);
|
||||
cmsg_port, true, tun_type);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
|
|||
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
|
||||
|
||||
/* Configure tunnel end point MAC. */
|
||||
if (nfp_netdev_is_nfp_repr(netdev)) {
|
||||
netdev_repr = netdev_priv(netdev);
|
||||
nfp_tunnel_write_macs(netdev_repr->app);
|
||||
nfp_tunnel_write_macs(app);
|
||||
|
||||
/* Store the tunnel destination in the rule data.
|
||||
* This must be present and be an exact match.
|
||||
*/
|
||||
nfp_flow->nfp_tun_ipv4_addr = tun_dst;
|
||||
nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
|
||||
}
|
||||
/* Store the tunnel destination in the rule data.
|
||||
* This must be present and be an exact match.
|
||||
*/
|
||||
nfp_flow->nfp_tun_ipv4_addr = tun_dst;
|
||||
nfp_tunnel_add_ipv4_off(app, tun_dst);
|
||||
|
||||
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
|
||||
err = nfp_flower_compile_geneve_opt(ext, flow, false);
|
||||
|
|
|
@ -21,7 +21,6 @@ struct nfp_mask_id_table {
|
|||
struct nfp_fl_flow_table_cmp_arg {
|
||||
struct net_device *netdev;
|
||||
unsigned long cookie;
|
||||
__be32 host_ctx;
|
||||
};
|
||||
|
||||
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
|
||||
|
@ -76,14 +75,13 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
|
|||
/* Must be called with either RTNL or rcu_read_lock */
|
||||
struct nfp_fl_payload *
|
||||
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
|
||||
struct net_device *netdev, __be32 host_ctx)
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
|
||||
flower_cmp_arg.netdev = netdev;
|
||||
flower_cmp_arg.cookie = tc_flower_cookie;
|
||||
flower_cmp_arg.host_ctx = host_ctx;
|
||||
|
||||
return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
|
||||
nfp_flower_table_params);
|
||||
|
@ -287,6 +285,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
|
|||
|
||||
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
|
||||
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
|
||||
nfp_flow->ingress_dev = netdev;
|
||||
|
||||
new_mask_id = 0;
|
||||
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
|
||||
|
@ -306,8 +305,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
|
|||
priv->stats[stats_cxt].bytes = 0;
|
||||
priv->stats[stats_cxt].used = jiffies;
|
||||
|
||||
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
|
||||
NFP_FL_STATS_CTX_DONT_CARE);
|
||||
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
|
||||
if (check_entry) {
|
||||
if (nfp_release_stats_entry(app, stats_cxt))
|
||||
return -EINVAL;
|
||||
|
@ -352,9 +350,7 @@ static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
|
|||
const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
|
||||
const struct nfp_fl_payload *flow_entry = obj;
|
||||
|
||||
if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
|
||||
(cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
|
||||
flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
|
||||
if (flow_entry->ingress_dev == cmp_arg->netdev)
|
||||
return flow_entry->tc_flower_cookie != cmp_arg->cookie;
|
||||
|
||||
return 1;
|
||||
|
|
|
@ -56,11 +56,10 @@
|
|||
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
|
||||
|
||||
static int
|
||||
nfp_flower_xmit_flow(struct net_device *netdev,
|
||||
struct nfp_fl_payload *nfp_flow, u8 mtype)
|
||||
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
|
||||
u8 mtype)
|
||||
{
|
||||
u32 meta_len, key_len, mask_len, act_len, tot_len;
|
||||
struct nfp_repr *priv = netdev_priv(netdev);
|
||||
struct sk_buff *skb;
|
||||
unsigned char *msg;
|
||||
|
||||
|
@ -78,7 +77,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
|
|||
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
|
||||
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
|
||||
|
||||
skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
|
||||
skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -96,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
|
|||
nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
|
||||
nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
|
||||
|
||||
nfp_ctrl_tx(priv->app->ctrl, skb);
|
||||
nfp_ctrl_tx(app->ctrl, skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -129,9 +128,9 @@ nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
|
|||
|
||||
static int
|
||||
nfp_flower_calculate_key_layers(struct nfp_app *app,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_key_ls *ret_key_ls,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
bool egress,
|
||||
enum nfp_flower_tun_type *tun_type)
|
||||
{
|
||||
struct flow_dissector_key_basic *mask_basic = NULL;
|
||||
|
@ -187,8 +186,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
|
|||
skb_flow_dissector_target(flow->dissector,
|
||||
FLOW_DISSECTOR_KEY_ENC_CONTROL,
|
||||
flow->key);
|
||||
if (!egress)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (mask_enc_ctl->addr_type != 0xffff ||
|
||||
enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
|
||||
|
@ -251,9 +248,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
|
|||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
} else if (egress) {
|
||||
/* Reject non tunnel matches offloaded to egress repr. */
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Ensure the ingress netdev matches the expected tun type. */
|
||||
if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
|
||||
|
@ -374,7 +372,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
|
|||
}
|
||||
|
||||
static struct nfp_fl_payload *
|
||||
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
|
||||
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
|
||||
{
|
||||
struct nfp_fl_payload *flow_pay;
|
||||
|
||||
|
@ -398,7 +396,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
|
|||
|
||||
flow_pay->nfp_tun_ipv4_addr = 0;
|
||||
flow_pay->meta.flags = 0;
|
||||
flow_pay->ingress_offload = !egress;
|
||||
|
||||
return flow_pay;
|
||||
|
||||
|
@ -416,7 +413,6 @@ err_free_flow:
|
|||
* @app: Pointer to the APP handle
|
||||
* @netdev: netdev structure.
|
||||
* @flow: TC flower classifier offload structure.
|
||||
* @egress: NFP netdev is the egress.
|
||||
*
|
||||
* Adds a new flow to the repeated hash structure and action payload.
|
||||
*
|
||||
|
@ -424,46 +420,35 @@ err_free_flow:
|
|||
*/
|
||||
static int
|
||||
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
|
||||
struct tc_cls_flower_offload *flow, bool egress)
|
||||
struct tc_cls_flower_offload *flow)
|
||||
{
|
||||
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
|
||||
struct nfp_port *port = nfp_port_from_netdev(netdev);
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_fl_payload *flow_pay;
|
||||
struct nfp_fl_key_ls *key_layer;
|
||||
struct net_device *ingr_dev;
|
||||
struct nfp_port *port = NULL;
|
||||
int err;
|
||||
|
||||
ingr_dev = egress ? NULL : netdev;
|
||||
flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
|
||||
NFP_FL_STATS_CTX_DONT_CARE);
|
||||
if (flow_pay) {
|
||||
/* Ignore as duplicate if it has been added by different cb. */
|
||||
if (flow_pay->ingress_offload && egress)
|
||||
return 0;
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (nfp_netdev_is_nfp_repr(netdev))
|
||||
port = nfp_port_from_netdev(netdev);
|
||||
|
||||
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
|
||||
if (!key_layer)
|
||||
return -ENOMEM;
|
||||
|
||||
err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
|
||||
err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
|
||||
&tun_type);
|
||||
if (err)
|
||||
goto err_free_key_ls;
|
||||
|
||||
flow_pay = nfp_flower_allocate_new(key_layer, egress);
|
||||
flow_pay = nfp_flower_allocate_new(key_layer);
|
||||
if (!flow_pay) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_key_ls;
|
||||
}
|
||||
|
||||
flow_pay->ingress_dev = egress ? NULL : netdev;
|
||||
|
||||
err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
|
||||
tun_type);
|
||||
err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
|
||||
flow_pay, tun_type);
|
||||
if (err)
|
||||
goto err_destroy_flow;
|
||||
|
||||
|
@ -471,12 +456,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
|
|||
if (err)
|
||||
goto err_destroy_flow;
|
||||
|
||||
err = nfp_compile_flow_metadata(app, flow, flow_pay,
|
||||
flow_pay->ingress_dev);
|
||||
err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
|
||||
if (err)
|
||||
goto err_destroy_flow;
|
||||
|
||||
err = nfp_flower_xmit_flow(netdev, flow_pay,
|
||||
err = nfp_flower_xmit_flow(app, flow_pay,
|
||||
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
|
||||
if (err)
|
||||
goto err_destroy_flow;
|
||||
|
@ -487,7 +471,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
|
|||
if (err)
|
||||
goto err_destroy_flow;
|
||||
|
||||
port->tc_offload_cnt++;
|
||||
if (port)
|
||||
port->tc_offload_cnt++;
|
||||
|
||||
/* Deallocate flow payload when flower rule has been destroyed. */
|
||||
kfree(key_layer);
|
||||
|
@ -509,7 +494,6 @@ err_free_key_ls:
|
|||
* @app: Pointer to the APP handle
|
||||
* @netdev: netdev structure.
|
||||
* @flow: TC flower classifier offload structure
|
||||
* @egress: Netdev is the egress dev.
|
||||
*
|
||||
* Removes a flow from the repeated hash structure and clears the
|
||||
* action payload.
|
||||
|
@ -518,19 +502,19 @@ err_free_key_ls:
|
|||
*/
|
||||
static int
|
||||
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
|
||||
struct tc_cls_flower_offload *flow, bool egress)
|
||||
struct tc_cls_flower_offload *flow)
|
||||
{
|
||||
struct nfp_port *port = nfp_port_from_netdev(netdev);
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_fl_payload *nfp_flow;
|
||||
struct net_device *ingr_dev;
|
||||
struct nfp_port *port = NULL;
|
||||
int err;
|
||||
|
||||
ingr_dev = egress ? NULL : netdev;
|
||||
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
|
||||
NFP_FL_STATS_CTX_DONT_CARE);
|
||||
if (nfp_netdev_is_nfp_repr(netdev))
|
||||
port = nfp_port_from_netdev(netdev);
|
||||
|
||||
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
|
||||
if (!nfp_flow)
|
||||
return egress ? 0 : -ENOENT;
|
||||
return -ENOENT;
|
||||
|
||||
err = nfp_modify_flow_metadata(app, nfp_flow);
|
||||
if (err)
|
||||
|
@ -539,13 +523,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
|
|||
if (nfp_flow->nfp_tun_ipv4_addr)
|
||||
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
|
||||
|
||||
err = nfp_flower_xmit_flow(netdev, nfp_flow,
|
||||
err = nfp_flower_xmit_flow(app, nfp_flow,
|
||||
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
|
||||
if (err)
|
||||
goto err_free_flow;
|
||||
|
||||
err_free_flow:
|
||||
port->tc_offload_cnt--;
|
||||
if (port)
|
||||
port->tc_offload_cnt--;
|
||||
kfree(nfp_flow->action_data);
|
||||
kfree(nfp_flow->mask_data);
|
||||
kfree(nfp_flow->unmasked_data);
|
||||
|
@ -561,7 +546,6 @@ err_free_flow:
|
|||
* @app: Pointer to the APP handle
|
||||
* @netdev: Netdev structure.
|
||||
* @flow: TC flower classifier offload structure
|
||||
* @egress: Netdev is the egress dev.
|
||||
*
|
||||
* Populates a flow statistics structure which which corresponds to a
|
||||
* specific flow.
|
||||
|
@ -570,22 +554,16 @@ err_free_flow:
|
|||
*/
|
||||
static int
|
||||
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
|
||||
struct tc_cls_flower_offload *flow, bool egress)
|
||||
struct tc_cls_flower_offload *flow)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_fl_payload *nfp_flow;
|
||||
struct net_device *ingr_dev;
|
||||
u32 ctx_id;
|
||||
|
||||
ingr_dev = egress ? NULL : netdev;
|
||||
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
|
||||
NFP_FL_STATS_CTX_DONT_CARE);
|
||||
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
|
||||
if (!nfp_flow)
|
||||
return -EINVAL;
|
||||
|
||||
if (nfp_flow->ingress_offload && egress)
|
||||
return 0;
|
||||
|
||||
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
|
||||
|
||||
spin_lock_bh(&priv->stats_lock);
|
||||
|
@ -602,35 +580,18 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
|
|||
|
||||
static int
|
||||
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
|
||||
struct tc_cls_flower_offload *flower, bool egress)
|
||||
struct tc_cls_flower_offload *flower)
|
||||
{
|
||||
if (!eth_proto_is_802_3(flower->common.protocol))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
return nfp_flower_add_offload(app, netdev, flower, egress);
|
||||
return nfp_flower_add_offload(app, netdev, flower);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
return nfp_flower_del_offload(app, netdev, flower, egress);
|
||||
return nfp_flower_del_offload(app, netdev, flower);
|
||||
case TC_CLSFLOWER_STATS:
|
||||
return nfp_flower_get_stats(app, netdev, flower, egress);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
|
||||
void *cb_priv)
|
||||
{
|
||||
struct nfp_repr *repr = cb_priv;
|
||||
|
||||
if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_CLSFLOWER:
|
||||
return nfp_flower_repr_offload(repr->app, repr->netdev,
|
||||
type_data, true);
|
||||
return nfp_flower_get_stats(app, netdev, flower);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -647,7 +608,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
|
|||
switch (type) {
|
||||
case TC_SETUP_CLSFLOWER:
|
||||
return nfp_flower_repr_offload(repr->app, repr->netdev,
|
||||
type_data, false);
|
||||
type_data);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -686,3 +647,129 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
struct nfp_flower_indr_block_cb_priv {
|
||||
struct net_device *netdev;
|
||||
struct nfp_app *app;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static struct nfp_flower_indr_block_cb_priv *
|
||||
nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct nfp_flower_indr_block_cb_priv *cb_priv;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
|
||||
/* All callback list access should be protected by RTNL. */
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
|
||||
if (cb_priv->netdev == netdev)
|
||||
return cb_priv;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
|
||||
void *type_data, void *cb_priv)
|
||||
{
|
||||
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
|
||||
struct tc_cls_flower_offload *flower = type_data;
|
||||
|
||||
if (flower->common.chain_index)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_CLSFLOWER:
|
||||
return nfp_flower_repr_offload(priv->app, priv->netdev,
|
||||
type_data);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
struct nfp_flower_indr_block_cb_priv *cb_priv;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
int err;
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
|
||||
if (!cb_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
cb_priv->netdev = netdev;
|
||||
cb_priv->app = app;
|
||||
list_add(&cb_priv->list, &priv->indr_block_cb_priv);
|
||||
|
||||
err = tcf_block_cb_register(f->block,
|
||||
nfp_flower_setup_indr_block_cb,
|
||||
netdev, cb_priv, f->extack);
|
||||
if (err) {
|
||||
list_del(&cb_priv->list);
|
||||
kfree(cb_priv);
|
||||
}
|
||||
|
||||
return err;
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block,
|
||||
nfp_flower_setup_indr_block_cb, netdev);
|
||||
cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
|
||||
if (cb_priv) {
|
||||
list_del(&cb_priv->list);
|
||||
kfree(cb_priv);
|
||||
}
|
||||
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
|
||||
enum tc_setup_type type, void *type_data)
|
||||
{
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
|
||||
type_data);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
|
||||
struct net_device *netdev,
|
||||
unsigned long event)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!nfp_fl_is_netdev_to_offload(netdev))
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (event == NETDEV_REGISTER) {
|
||||
err = __tc_indr_block_cb_register(netdev, app,
|
||||
nfp_flower_indr_setup_tc_cb,
|
||||
netdev);
|
||||
if (err)
|
||||
nfp_flower_cmsg_warn(app,
|
||||
"Indirect block reg failed - %s\n",
|
||||
netdev->name);
|
||||
} else if (event == NETDEV_UNREGISTER) {
|
||||
__tc_indr_block_cb_unregister(netdev,
|
||||
nfp_flower_indr_setup_tc_cb,
|
||||
netdev);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include <linux/etherdevice.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <net/netevent.h>
|
||||
#include <net/vxlan.h>
|
||||
#include <linux/idr.h>
|
||||
#include <net/dst_metadata.h>
|
||||
#include <net/arp.h>
|
||||
|
@ -182,20 +181,6 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
|
|||
}
|
||||
}
|
||||
|
||||
static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
|
||||
{
|
||||
if (!netdev->rtnl_link_ops)
|
||||
return false;
|
||||
if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
|
||||
return true;
|
||||
if (netif_is_vxlan(netdev))
|
||||
return true;
|
||||
if (netif_is_geneve(netdev))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
|
||||
gfp_t flag)
|
||||
|
@ -617,7 +602,7 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
|
|||
|
||||
if (nfp_netdev_is_nfp_repr(netdev))
|
||||
port = nfp_repr_get_port_id(netdev);
|
||||
else if (!nfp_tun_is_netdev_to_offload(netdev))
|
||||
else if (!nfp_fl_is_netdev_to_offload(netdev))
|
||||
return;
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
|
@ -660,7 +645,7 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
|
|||
{
|
||||
if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
|
||||
/* If non-nfp netdev then free its offload index. */
|
||||
if (nfp_tun_is_netdev_to_offload(netdev))
|
||||
if (nfp_fl_is_netdev_to_offload(netdev))
|
||||
nfp_tun_del_mac_idx(app, netdev->ifindex);
|
||||
} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
|
||||
event == NETDEV_REGISTER) {
|
||||
|
|
|
@ -81,6 +81,14 @@ void __tcf_block_cb_unregister(struct tcf_block *block,
|
|||
struct tcf_block_cb *block_cb);
|
||||
void tcf_block_cb_unregister(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident);
|
||||
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident);
|
||||
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident);
|
||||
void __tc_indr_block_cb_unregister(struct net_device *dev,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident);
|
||||
void tc_indr_block_cb_unregister(struct net_device *dev,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident);
|
||||
|
||||
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
struct tcf_result *res, bool compat_mode);
|
||||
|
@ -183,6 +191,32 @@ void tcf_block_cb_unregister(struct tcf_block *block,
|
|||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
void __tc_indr_block_cb_unregister(struct net_device *dev,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void tc_indr_block_cb_unregister(struct net_device *dev,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
struct tcf_result *res, bool compat_mode)
|
||||
{
|
||||
|
|
|
@ -24,6 +24,9 @@ struct bpf_flow_keys;
|
|||
typedef int tc_setup_cb_t(enum tc_setup_type type,
|
||||
void *type_data, void *cb_priv);
|
||||
|
||||
typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
|
||||
enum tc_setup_type type, void *type_data);
|
||||
|
||||
struct qdisc_rate_table {
|
||||
struct tc_ratespec rate;
|
||||
u32 data[256];
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/kmod.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/netlink.h>
|
||||
|
@ -365,6 +366,245 @@ static void tcf_chain_flush(struct tcf_chain *chain)
|
|||
}
|
||||
}
|
||||
|
||||
static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
|
||||
{
|
||||
const struct Qdisc_class_ops *cops;
|
||||
struct Qdisc *qdisc;
|
||||
|
||||
if (!dev_ingress_queue(dev))
|
||||
return NULL;
|
||||
|
||||
qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
|
||||
if (!qdisc)
|
||||
return NULL;
|
||||
|
||||
cops = qdisc->ops->cl_ops;
|
||||
if (!cops)
|
||||
return NULL;
|
||||
|
||||
if (!cops->tcf_block)
|
||||
return NULL;
|
||||
|
||||
return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
|
||||
}
|
||||
|
||||
static struct rhashtable indr_setup_block_ht;
|
||||
|
||||
struct tc_indr_block_dev {
|
||||
struct rhash_head ht_node;
|
||||
struct net_device *dev;
|
||||
unsigned int refcnt;
|
||||
struct list_head cb_list;
|
||||
struct tcf_block *block;
|
||||
};
|
||||
|
||||
struct tc_indr_block_cb {
|
||||
struct list_head list;
|
||||
void *cb_priv;
|
||||
tc_indr_block_bind_cb_t *cb;
|
||||
void *cb_ident;
|
||||
};
|
||||
|
||||
static const struct rhashtable_params tc_indr_setup_block_ht_params = {
|
||||
.key_offset = offsetof(struct tc_indr_block_dev, dev),
|
||||
.head_offset = offsetof(struct tc_indr_block_dev, ht_node),
|
||||
.key_len = sizeof(struct net_device *),
|
||||
};
|
||||
|
||||
static struct tc_indr_block_dev *
|
||||
tc_indr_block_dev_lookup(struct net_device *dev)
|
||||
{
|
||||
return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
|
||||
tc_indr_setup_block_ht_params);
|
||||
}
|
||||
|
||||
static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
|
||||
{
|
||||
struct tc_indr_block_dev *indr_dev;
|
||||
|
||||
indr_dev = tc_indr_block_dev_lookup(dev);
|
||||
if (indr_dev)
|
||||
goto inc_ref;
|
||||
|
||||
indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
|
||||
if (!indr_dev)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&indr_dev->cb_list);
|
||||
indr_dev->dev = dev;
|
||||
indr_dev->block = tc_dev_ingress_block(dev);
|
||||
if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
|
||||
tc_indr_setup_block_ht_params)) {
|
||||
kfree(indr_dev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
inc_ref:
|
||||
indr_dev->refcnt++;
|
||||
return indr_dev;
|
||||
}
|
||||
|
||||
static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
|
||||
{
|
||||
if (--indr_dev->refcnt)
|
||||
return;
|
||||
|
||||
rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
|
||||
tc_indr_setup_block_ht_params);
|
||||
kfree(indr_dev);
|
||||
}
|
||||
|
||||
static struct tc_indr_block_cb *
|
||||
tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
struct tc_indr_block_cb *indr_block_cb;
|
||||
|
||||
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
|
||||
if (indr_block_cb->cb == cb &&
|
||||
indr_block_cb->cb_ident == cb_ident)
|
||||
return indr_block_cb;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct tc_indr_block_cb *
|
||||
tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
struct tc_indr_block_cb *indr_block_cb;
|
||||
|
||||
indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
|
||||
if (indr_block_cb)
|
||||
return ERR_PTR(-EEXIST);
|
||||
|
||||
indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
|
||||
if (!indr_block_cb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
indr_block_cb->cb_priv = cb_priv;
|
||||
indr_block_cb->cb = cb;
|
||||
indr_block_cb->cb_ident = cb_ident;
|
||||
list_add(&indr_block_cb->list, &indr_dev->cb_list);
|
||||
|
||||
return indr_block_cb;
|
||||
}
|
||||
|
||||
static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
|
||||
{
|
||||
list_del(&indr_block_cb->list);
|
||||
kfree(indr_block_cb);
|
||||
}
|
||||
|
||||
static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
|
||||
struct tc_indr_block_cb *indr_block_cb,
|
||||
enum tc_block_command command)
|
||||
{
|
||||
struct tc_block_offload bo = {
|
||||
.command = command,
|
||||
.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
|
||||
.block = indr_dev->block,
|
||||
};
|
||||
|
||||
if (!indr_dev->block)
|
||||
return;
|
||||
|
||||
indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
|
||||
&bo);
|
||||
}
|
||||
|
||||
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
struct tc_indr_block_cb *indr_block_cb;
|
||||
struct tc_indr_block_dev *indr_dev;
|
||||
int err;
|
||||
|
||||
indr_dev = tc_indr_block_dev_get(dev);
|
||||
if (!indr_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
|
||||
err = PTR_ERR_OR_ZERO(indr_block_cb);
|
||||
if (err)
|
||||
goto err_dev_put;
|
||||
|
||||
tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND);
|
||||
return 0;
|
||||
|
||||
err_dev_put:
|
||||
tc_indr_block_dev_put(indr_dev);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
|
||||
|
||||
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
|
||||
rtnl_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
|
||||
|
||||
void __tc_indr_block_cb_unregister(struct net_device *dev,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
struct tc_indr_block_cb *indr_block_cb;
|
||||
struct tc_indr_block_dev *indr_dev;
|
||||
|
||||
indr_dev = tc_indr_block_dev_lookup(dev);
|
||||
if (!indr_dev)
|
||||
return;
|
||||
|
||||
indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
|
||||
if (!indr_block_cb)
|
||||
return;
|
||||
|
||||
/* Send unbind message if required to free any block cbs. */
|
||||
tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND);
|
||||
tc_indr_block_cb_del(indr_block_cb);
|
||||
tc_indr_block_dev_put(indr_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
|
||||
|
||||
void tc_indr_block_cb_unregister(struct net_device *dev,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
rtnl_lock();
|
||||
__tc_indr_block_cb_unregister(dev, cb, cb_ident);
|
||||
rtnl_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
|
||||
|
||||
static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
|
||||
struct tcf_block_ext_info *ei,
|
||||
enum tc_block_command command,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tc_indr_block_cb *indr_block_cb;
|
||||
struct tc_indr_block_dev *indr_dev;
|
||||
struct tc_block_offload bo = {
|
||||
.command = command,
|
||||
.binder_type = ei->binder_type,
|
||||
.block = block,
|
||||
.extack = extack,
|
||||
};
|
||||
|
||||
indr_dev = tc_indr_block_dev_lookup(dev);
|
||||
if (!indr_dev)
|
||||
return;
|
||||
|
||||
indr_dev->block = command == TC_BLOCK_BIND ? block : NULL;
|
||||
|
||||
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
|
||||
indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
|
||||
&bo);
|
||||
}
|
||||
|
||||
static bool tcf_block_offload_in_use(struct tcf_block *block)
|
||||
{
|
||||
return block->offloadcnt;
|
||||
|
@ -406,12 +646,17 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
|
|||
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
|
||||
if (err == -EOPNOTSUPP)
|
||||
goto no_offload_dev_inc;
|
||||
return err;
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
|
||||
return 0;
|
||||
|
||||
no_offload_dev_inc:
|
||||
if (tcf_block_offload_in_use(block))
|
||||
return -EOPNOTSUPP;
|
||||
block->nooffloaddevcnt++;
|
||||
tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -421,6 +666,8 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
|
|||
struct net_device *dev = q->dev_queue->dev;
|
||||
int err;
|
||||
|
||||
tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL);
|
||||
|
||||
if (!dev->netdev_ops->ndo_setup_tc)
|
||||
goto no_offload_dev_dec;
|
||||
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
|
||||
|
@ -2355,6 +2602,11 @@ static int __init tc_filter_init(void)
|
|||
if (err)
|
||||
goto err_register_pernet_subsys;
|
||||
|
||||
err = rhashtable_init(&indr_setup_block_ht,
|
||||
&tc_indr_setup_block_ht_params);
|
||||
if (err)
|
||||
goto err_rhash_setup_block_ht;
|
||||
|
||||
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0);
|
||||
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
|
||||
rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
|
||||
|
@ -2366,6 +2618,8 @@ static int __init tc_filter_init(void)
|
|||
|
||||
return 0;
|
||||
|
||||
err_rhash_setup_block_ht:
|
||||
unregister_pernet_subsys(&tcf_net_ops);
|
||||
err_register_pernet_subsys:
|
||||
destroy_workqueue(tc_filter_wq);
|
||||
return err;
|
||||
|
|
Loading…
Reference in New Issue