nfp: flower: add qos offload stats request and reply

Add stats request function that sends a stats request message to hw for
a specific police-filter. Process stats reply from hw and update the
stored qos structure.

Signed-off-by: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Pieter Jansen van Vuuren 2019-05-04 04:46:28 -07:00 committed by David S. Miller
parent 49cbef1388
commit 5fb5c395e2
5 changed files with 210 additions and 0 deletions

View File

@ -278,6 +278,9 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb); nfp_tunnel_keep_alive(app, skb);
break; break;
case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
nfp_flower_stats_rlim_reply(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG: case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb); skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);

View File

@ -418,6 +418,7 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16, NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18, NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19, NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_MAX = 32, NFP_FLOWER_CMSG_TYPE_MAX = 32,
}; };

View File

@ -776,6 +776,9 @@ static int nfp_flower_init(struct nfp_app *app)
nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n"); nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
} }
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_init(app);
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv);
@ -799,6 +802,9 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low); skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work); flush_work(&app_priv->cmsg_work);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_cleanup(app);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag); nfp_flower_lag_cleanup(&app_priv->nfp_lag);

View File

@ -5,6 +5,7 @@
#define __NFP_FLOWER_H__ 1 #define __NFP_FLOWER_H__ 1
#include "cmsg.h" #include "cmsg.h"
#include "../nfp_net.h"
#include <linux/circ_buf.h> #include <linux/circ_buf.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
@ -158,6 +159,9 @@ struct nfp_fl_internal_ports {
* @active_mem_unit: Current active memory unit for flower rules * @active_mem_unit: Current active memory unit for flower rules
* @total_mem_units: Total number of available memory units for flower rules * @total_mem_units: Total number of available memory units for flower rules
* @internal_ports: Internal port ids used in offloaded rules * @internal_ports: Internal port ids used in offloaded rules
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
*/ */
struct nfp_flower_priv { struct nfp_flower_priv {
struct nfp_app *app; struct nfp_app *app;
@ -186,14 +190,23 @@ struct nfp_flower_priv {
unsigned int active_mem_unit; unsigned int active_mem_unit;
unsigned int total_mem_units; unsigned int total_mem_units;
struct nfp_fl_internal_ports internal_ports; struct nfp_fl_internal_ports internal_ports;
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
}; };
/** /**
* struct nfp_fl_qos - Flower APP priv data for quality of service * struct nfp_fl_qos - Flower APP priv data for quality of service
* @netdev_port_id: NFP port number of repr with qos info * @netdev_port_id: NFP port number of repr with qos info
* @curr_stats: Currently stored stats updates for qos info
* @prev_stats: Previously stored updates for qos info
* @last_update: Stored time when last stats were updated
*/ */
struct nfp_fl_qos { struct nfp_fl_qos {
u32 netdev_port_id; u32 netdev_port_id;
struct nfp_stat_pair curr_stats;
struct nfp_stat_pair prev_stats;
u64 last_update;
}; };
/** /**
@ -377,8 +390,11 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct nfp_fl_pre_lag *pre_act); struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app, int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master); struct net_device *master);
void nfp_flower_qos_init(struct nfp_app *app);
void nfp_flower_qos_cleanup(struct nfp_app *app);
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow); struct tc_cls_matchall_offload *flow);
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_reg_indir_block_handler(struct nfp_app *app, int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
struct net_device *netdev, struct net_device *netdev,
unsigned long event); unsigned long event);

View File

@ -9,6 +9,8 @@
#include "main.h" #include "main.h"
#include "../nfp_port.h" #include "../nfp_port.h"
#define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
struct nfp_police_cfg_head { struct nfp_police_cfg_head {
__be32 flags_opts; __be32 flags_opts;
__be32 port; __be32 port;
@ -47,12 +49,21 @@ struct nfp_police_config {
__be32 cir; __be32 cir;
}; };
struct nfp_police_stats_reply {
struct nfp_police_cfg_head head;
__be64 pass_bytes;
__be64 pass_pkts;
__be64 drop_bytes;
__be64 drop_pkts;
};
static int static int
nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow, struct tc_cls_matchall_offload *flow,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct flow_action_entry *action = &flow->rule->action.entries[0]; struct flow_action_entry *action = &flow->rule->action.entries[0];
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_flower_repr_priv *repr_priv; struct nfp_flower_repr_priv *repr_priv;
struct nfp_police_config *config; struct nfp_police_config *config;
struct nfp_repr *repr; struct nfp_repr *repr;
@ -114,6 +125,10 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
repr_priv = repr->app_priv; repr_priv = repr->app_priv;
repr_priv->qos_table.netdev_port_id = netdev_port_id; repr_priv->qos_table.netdev_port_id = netdev_port_id;
fl_priv->qos_rate_limiters++;
if (fl_priv->qos_rate_limiters == 1)
schedule_delayed_work(&fl_priv->qos_stats_work,
NFP_FL_QOS_UPDATE);
return 0; return 0;
} }
@ -123,6 +138,7 @@ nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow, struct tc_cls_matchall_offload *flow,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_flower_repr_priv *repr_priv; struct nfp_flower_repr_priv *repr_priv;
struct nfp_police_config *config; struct nfp_police_config *config;
struct nfp_repr *repr; struct nfp_repr *repr;
@ -150,6 +166,10 @@ nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
/* Clear all qos associate data for this interface */ /* Clear all qos associate data for this interface */
memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos)); memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
fl_priv->qos_rate_limiters--;
if (!fl_priv->qos_rate_limiters)
cancel_delayed_work_sync(&fl_priv->qos_stats_work);
config = nfp_flower_cmsg_get_data(skb); config = nfp_flower_cmsg_get_data(skb);
memset(config, 0, sizeof(struct nfp_police_config)); memset(config, 0, sizeof(struct nfp_police_config));
config->head.port = cpu_to_be32(netdev_port_id); config->head.port = cpu_to_be32(netdev_port_id);
@ -158,6 +178,167 @@ nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
return 0; return 0;
} }
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_police_stats_reply *msg;
struct nfp_stat_pair *curr_stats;
struct nfp_stat_pair *prev_stats;
struct net_device *netdev;
struct nfp_repr *repr;
u32 netdev_port_id;
msg = nfp_flower_cmsg_get_data(skb);
netdev_port_id = be32_to_cpu(msg->head.port);
rcu_read_lock();
netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
if (!netdev)
goto exit_unlock_rcu;
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
curr_stats = &repr_priv->qos_table.curr_stats;
prev_stats = &repr_priv->qos_table.prev_stats;
spin_lock_bh(&fl_priv->qos_stats_lock);
curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
be64_to_cpu(msg->drop_pkts);
curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
be64_to_cpu(msg->drop_bytes);
if (!repr_priv->qos_table.last_update) {
prev_stats->pkts = curr_stats->pkts;
prev_stats->bytes = curr_stats->bytes;
}
repr_priv->qos_table.last_update = jiffies;
spin_unlock_bh(&fl_priv->qos_stats_lock);
exit_unlock_rcu:
rcu_read_unlock();
}
static void
nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
u32 netdev_port_id)
{
struct nfp_police_cfg_head *head;
struct sk_buff *skb;
skb = nfp_flower_cmsg_alloc(fl_priv->app,
sizeof(struct nfp_police_cfg_head),
NFP_FLOWER_CMSG_TYPE_QOS_STATS,
GFP_ATOMIC);
if (!skb)
return;
head = nfp_flower_cmsg_get_data(skb);
memset(head, 0, sizeof(struct nfp_police_cfg_head));
head->port = cpu_to_be32(netdev_port_id);
nfp_ctrl_tx(fl_priv->app->ctrl, skb);
}
static void
nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
{
struct nfp_reprs *repr_set;
int i;
rcu_read_lock();
repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
if (!repr_set)
goto exit_unlock_rcu;
for (i = 0; i < repr_set->num_reprs; i++) {
struct net_device *netdev;
netdev = rcu_dereference(repr_set->reprs[i]);
if (netdev) {
struct nfp_repr *priv = netdev_priv(netdev);
struct nfp_flower_repr_priv *repr_priv;
u32 netdev_port_id;
repr_priv = priv->app_priv;
netdev_port_id = repr_priv->qos_table.netdev_port_id;
if (!netdev_port_id)
continue;
nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
}
}
exit_unlock_rcu:
rcu_read_unlock();
}
static void update_stats_cache(struct work_struct *work)
{
struct delayed_work *delayed_work;
struct nfp_flower_priv *fl_priv;
delayed_work = to_delayed_work(work);
fl_priv = container_of(delayed_work, struct nfp_flower_priv,
qos_stats_work);
nfp_flower_stats_rlim_request_all(fl_priv);
schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
}
static int
nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow,
struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_stat_pair *curr_stats;
struct nfp_stat_pair *prev_stats;
u64 diff_bytes, diff_pkts;
struct nfp_repr *repr;
if (!nfp_netdev_is_nfp_repr(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
return -EOPNOTSUPP;
}
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
if (!repr_priv->qos_table.netdev_port_id) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
return -EOPNOTSUPP;
}
spin_lock_bh(&fl_priv->qos_stats_lock);
curr_stats = &repr_priv->qos_table.curr_stats;
prev_stats = &repr_priv->qos_table.prev_stats;
diff_pkts = curr_stats->pkts - prev_stats->pkts;
diff_bytes = curr_stats->bytes - prev_stats->bytes;
prev_stats->pkts = curr_stats->pkts;
prev_stats->bytes = curr_stats->bytes;
spin_unlock_bh(&fl_priv->qos_stats_lock);
flow_stats_update(&flow->stats, diff_bytes, diff_pkts,
repr_priv->qos_table.last_update);
return 0;
}
void nfp_flower_qos_init(struct nfp_app *app)
{
struct nfp_flower_priv *fl_priv = app->priv;
spin_lock_init(&fl_priv->qos_stats_lock);
INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
}
void nfp_flower_qos_cleanup(struct nfp_app *app)
{
struct nfp_flower_priv *fl_priv = app->priv;
cancel_delayed_work_sync(&fl_priv->qos_stats_work);
}
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow) struct tc_cls_matchall_offload *flow)
{ {
@ -176,6 +357,9 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
case TC_CLSMATCHALL_DESTROY: case TC_CLSMATCHALL_DESTROY:
return nfp_flower_remove_rate_limiter(app, netdev, flow, return nfp_flower_remove_rate_limiter(app, netdev, flow,
extack); extack);
case TC_CLSMATCHALL_STATS:
return nfp_flower_stats_rate_limiter(app, netdev, flow,
extack);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }