Merge branch 'nfp-flower-handle-MTU-changes'
Jakub Kicinski says: ==================== nfp: flower: handle MTU changes This set improves MTU handling for flower offload. The max MTU is correctly capped and physical port MTU is communicated to the FW (and indirectly HW). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
64e828dfc4
|
@ -221,7 +221,7 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
|
|||
}
|
||||
|
||||
static int
|
||||
nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
|
||||
nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
unsigned int max_mtu;
|
||||
|
@ -413,7 +413,7 @@ const struct nfp_app_type app_bpf = {
|
|||
.init = nfp_bpf_init,
|
||||
.clean = nfp_bpf_clean,
|
||||
|
||||
.change_mtu = nfp_bpf_change_mtu,
|
||||
.check_mtu = nfp_bpf_check_mtu,
|
||||
|
||||
.extra_cap = nfp_bpf_extra_cap,
|
||||
|
||||
|
|
|
@ -104,7 +104,8 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
|
|||
msg->ports[idx].phys_port = phys_port;
|
||||
}
|
||||
|
||||
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
|
||||
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok,
|
||||
unsigned int mtu, bool mtu_only)
|
||||
{
|
||||
struct nfp_flower_cmsg_portmod *msg;
|
||||
struct sk_buff *skb;
|
||||
|
@ -118,7 +119,11 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
|
|||
msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
|
||||
msg->reserved = 0;
|
||||
msg->info = carrier_ok;
|
||||
msg->mtu = cpu_to_be16(repr->netdev->mtu);
|
||||
|
||||
if (mtu_only)
|
||||
msg->info |= NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY;
|
||||
|
||||
msg->mtu = cpu_to_be16(mtu);
|
||||
|
||||
nfp_ctrl_tx(repr->app->ctrl, skb);
|
||||
|
||||
|
@ -146,6 +151,34 @@ int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
nfp_flower_process_mtu_ack(struct nfp_app *app, struct sk_buff *skb)
|
||||
{
|
||||
struct nfp_flower_priv *app_priv = app->priv;
|
||||
struct nfp_flower_cmsg_portmod *msg;
|
||||
|
||||
msg = nfp_flower_cmsg_get_data(skb);
|
||||
|
||||
if (!(msg->info & NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY))
|
||||
return false;
|
||||
|
||||
spin_lock_bh(&app_priv->mtu_conf.lock);
|
||||
if (!app_priv->mtu_conf.requested_val ||
|
||||
app_priv->mtu_conf.portnum != be32_to_cpu(msg->portnum) ||
|
||||
be16_to_cpu(msg->mtu) != app_priv->mtu_conf.requested_val) {
|
||||
/* Not an ack for requested MTU change. */
|
||||
spin_unlock_bh(&app_priv->mtu_conf.lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
app_priv->mtu_conf.ack = true;
|
||||
app_priv->mtu_conf.requested_val = 0;
|
||||
wake_up(&app_priv->mtu_conf.wait_q);
|
||||
spin_unlock_bh(&app_priv->mtu_conf.lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
|
||||
{
|
||||
|
@ -269,6 +302,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
|
|||
/* We need to deal with stats updates from HW asap */
|
||||
nfp_flower_rx_flow_stats(app, skb);
|
||||
dev_consume_skb_any(skb);
|
||||
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_MOD &&
|
||||
nfp_flower_process_mtu_ack(app, skb)) {
|
||||
/* Handle MTU acks outside wq to prevent RTNL conflict. */
|
||||
dev_consume_skb_any(skb);
|
||||
} else {
|
||||
skb_queue_tail(&priv->cmsg_skbs, skb);
|
||||
schedule_work(&priv->cmsg_work);
|
||||
|
|
|
@ -397,6 +397,7 @@ struct nfp_flower_cmsg_portmod {
|
|||
};
|
||||
|
||||
#define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0)
|
||||
#define NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY BIT(1)
|
||||
|
||||
/* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */
|
||||
struct nfp_flower_cmsg_portreify {
|
||||
|
@ -464,7 +465,8 @@ void
|
|||
nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
|
||||
unsigned int nbi, unsigned int nbi_port,
|
||||
unsigned int phys_port);
|
||||
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
|
||||
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok,
|
||||
unsigned int mtu, bool mtu_only);
|
||||
int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists);
|
||||
void nfp_flower_cmsg_process_rx(struct work_struct *work);
|
||||
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
|
||||
|
|
|
@ -52,6 +52,8 @@
|
|||
|
||||
#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
|
||||
|
||||
#define NFP_FLOWER_FRAME_HEADROOM 158
|
||||
|
||||
static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
|
||||
{
|
||||
return "FLOWER";
|
||||
|
@ -157,7 +159,7 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
|
|||
{
|
||||
int err;
|
||||
|
||||
err = nfp_flower_cmsg_portmod(repr, true);
|
||||
err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -171,7 +173,7 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
|
|||
{
|
||||
netif_tx_disable(repr->netdev);
|
||||
|
||||
return nfp_flower_cmsg_portmod(repr, false);
|
||||
return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -521,6 +523,9 @@ static int nfp_flower_init(struct nfp_app *app)
|
|||
INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
|
||||
init_waitqueue_head(&app_priv->reify_wait_queue);
|
||||
|
||||
init_waitqueue_head(&app_priv->mtu_conf.wait_q);
|
||||
spin_lock_init(&app_priv->mtu_conf.lock);
|
||||
|
||||
err = nfp_flower_metadata_init(app);
|
||||
if (err)
|
||||
goto err_free_app_priv;
|
||||
|
@ -552,6 +557,81 @@ static void nfp_flower_clean(struct nfp_app *app)
|
|||
app->priv = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_flower_check_mtu(struct nfp_app *app, struct net_device *netdev,
|
||||
int new_mtu)
|
||||
{
|
||||
/* The flower fw reserves NFP_FLOWER_FRAME_HEADROOM bytes of the
|
||||
* supported max MTU to allow for appending tunnel headers. To prevent
|
||||
* unexpected behaviour this needs to be accounted for.
|
||||
*/
|
||||
if (new_mtu > netdev->max_mtu - NFP_FLOWER_FRAME_HEADROOM) {
|
||||
nfp_err(app->cpp, "New MTU (%d) is not valid\n", new_mtu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
spin_lock_bh(&app_priv->mtu_conf.lock);
|
||||
ret = app_priv->mtu_conf.ack;
|
||||
spin_unlock_bh(&app_priv->mtu_conf.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
|
||||
int new_mtu)
|
||||
{
|
||||
struct nfp_flower_priv *app_priv = app->priv;
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
int err, ack;
|
||||
|
||||
/* Only need to config FW for physical port MTU change. */
|
||||
if (repr->port->type != NFP_PORT_PHYS_PORT)
|
||||
return 0;
|
||||
|
||||
if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
|
||||
nfp_err(app->cpp, "Physical port MTU setting not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_bh(&app_priv->mtu_conf.lock);
|
||||
app_priv->mtu_conf.ack = false;
|
||||
app_priv->mtu_conf.requested_val = new_mtu;
|
||||
app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
|
||||
spin_unlock_bh(&app_priv->mtu_conf.lock);
|
||||
|
||||
err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
|
||||
true);
|
||||
if (err) {
|
||||
spin_lock_bh(&app_priv->mtu_conf.lock);
|
||||
app_priv->mtu_conf.requested_val = 0;
|
||||
spin_unlock_bh(&app_priv->mtu_conf.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Wait for fw to ack the change. */
|
||||
ack = wait_event_timeout(app_priv->mtu_conf.wait_q,
|
||||
nfp_flower_check_ack(app_priv),
|
||||
msecs_to_jiffies(10));
|
||||
|
||||
if (!ack) {
|
||||
spin_lock_bh(&app_priv->mtu_conf.lock);
|
||||
app_priv->mtu_conf.requested_val = 0;
|
||||
spin_unlock_bh(&app_priv->mtu_conf.lock);
|
||||
nfp_warn(app->cpp, "MTU change not verified with fw\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nfp_flower_start(struct nfp_app *app)
|
||||
{
|
||||
return nfp_tunnel_config_start(app);
|
||||
|
@ -574,6 +654,9 @@ const struct nfp_app_type app_flower = {
|
|||
.init = nfp_flower_init,
|
||||
.clean = nfp_flower_clean,
|
||||
|
||||
.check_mtu = nfp_flower_check_mtu,
|
||||
.repr_change_mtu = nfp_flower_repr_change_mtu,
|
||||
|
||||
.vnic_alloc = nfp_flower_vnic_alloc,
|
||||
.vnic_init = nfp_flower_vnic_init,
|
||||
.vnic_clean = nfp_flower_vnic_clean,
|
||||
|
|
|
@ -65,6 +65,7 @@ struct nfp_app;
|
|||
|
||||
/* Extra features bitmap. */
|
||||
#define NFP_FL_FEATS_GENEVE BIT(0)
|
||||
#define NFP_FL_NBI_MTU_SETTING BIT(1)
|
||||
|
||||
struct nfp_fl_mask_id {
|
||||
struct circ_buf mask_id_free_list;
|
||||
|
@ -78,6 +79,22 @@ struct nfp_fl_stats_id {
|
|||
u8 repeated_em_count;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nfp_mtu_conf - manage MTU setting
|
||||
* @portnum: NFP port number of repr with requested MTU change
|
||||
* @requested_val: MTU value requested for repr
|
||||
* @ack: Received ack that MTU has been correctly set
|
||||
* @wait_q: Wait queue for MTU acknowledgements
|
||||
* @lock: Lock for setting/reading MTU variables
|
||||
*/
|
||||
struct nfp_mtu_conf {
|
||||
u32 portnum;
|
||||
unsigned int requested_val;
|
||||
bool ack;
|
||||
wait_queue_head_t wait_q;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nfp_flower_priv - Flower APP per-vNIC priv data
|
||||
* @app: Back pointer to app
|
||||
|
@ -106,6 +123,7 @@ struct nfp_fl_stats_id {
|
|||
* @reify_replies: atomically stores the number of replies received
|
||||
* from firmware for repr reify
|
||||
* @reify_wait_queue: wait queue for repr reify response counting
|
||||
* @mtu_conf: Configuration of repr MTU value
|
||||
*/
|
||||
struct nfp_flower_priv {
|
||||
struct nfp_app *app;
|
||||
|
@ -133,6 +151,7 @@ struct nfp_flower_priv {
|
|||
struct notifier_block nfp_tun_neigh_nb;
|
||||
atomic_t reify_replies;
|
||||
wait_queue_head_t reify_wait_queue;
|
||||
struct nfp_mtu_conf mtu_conf;
|
||||
};
|
||||
|
||||
struct nfp_fl_key_ls {
|
||||
|
|
|
@ -86,8 +86,8 @@ extern const struct nfp_app_type app_flower;
|
|||
* @repr_clean: representor about to be unregistered
|
||||
* @repr_open: representor netdev open callback
|
||||
* @repr_stop: representor netdev stop callback
|
||||
* @change_mtu: MTU change on a netdev has been requested (veto-only, change
|
||||
* is not guaranteed to be committed)
|
||||
* @check_mtu: MTU change request on a netdev (verify it is valid)
|
||||
* @repr_change_mtu: MTU change request on repr (make and verify change)
|
||||
* @start: start application logic
|
||||
* @stop: stop application logic
|
||||
* @ctrl_msg_rx: control message handler
|
||||
|
@ -124,8 +124,10 @@ struct nfp_app_type {
|
|||
int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
|
||||
int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
|
||||
|
||||
int (*change_mtu)(struct nfp_app *app, struct net_device *netdev,
|
||||
int new_mtu);
|
||||
int (*check_mtu)(struct nfp_app *app, struct net_device *netdev,
|
||||
int new_mtu);
|
||||
int (*repr_change_mtu)(struct nfp_app *app, struct net_device *netdev,
|
||||
int new_mtu);
|
||||
|
||||
int (*start)(struct nfp_app *app);
|
||||
void (*stop)(struct nfp_app *app);
|
||||
|
@ -247,11 +249,20 @@ nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
|
|||
}
|
||||
|
||||
static inline int
|
||||
nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
|
||||
nfp_app_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
if (!app || !app->type->change_mtu)
|
||||
if (!app || !app->type->check_mtu)
|
||||
return 0;
|
||||
return app->type->change_mtu(app, netdev, new_mtu);
|
||||
return app->type->check_mtu(app, netdev, new_mtu);
|
||||
}
|
||||
|
||||
static inline int
|
||||
nfp_app_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
|
||||
int new_mtu)
|
||||
{
|
||||
if (!app || !app->type->repr_change_mtu)
|
||||
return 0;
|
||||
return app->type->repr_change_mtu(app, netdev, new_mtu);
|
||||
}
|
||||
|
||||
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
|
||||
|
|
|
@ -3066,7 +3066,7 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
struct nfp_net_dp *dp;
|
||||
int err;
|
||||
|
||||
err = nfp_app_change_mtu(nn->app, netdev, new_mtu);
|
||||
err = nfp_app_check_mtu(nn->app, netdev, new_mtu);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -196,8 +196,19 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
|
|||
static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
int err;
|
||||
|
||||
return nfp_app_change_mtu(repr->app, netdev, new_mtu);
|
||||
err = nfp_app_check_mtu(repr->app, netdev, new_mtu);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = nfp_app_repr_change_mtu(repr->app, netdev, new_mtu);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
netdev->mtu = new_mtu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
|
Loading…
Reference in New Issue