veth: factor out initialization helper

Extract in simpler helpers the code to enable and disable a
range of xdp/napi instance, with the common property that
"disable" helpers can't fail.

Will be used by the next patch. No functional change intended.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Paolo Abeni 2021-07-20 10:41:49 +02:00 committed by David S. Miller
parent f7918b7901
commit dedd53c5e0
1 changed files with 92 additions and 49 deletions

View File

@ -926,12 +926,12 @@ static int veth_poll(struct napi_struct *napi, int budget)
return done;
}
static int __veth_napi_enable(struct net_device *dev)
static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int err, i;
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
@ -939,7 +939,7 @@ static int __veth_napi_enable(struct net_device *dev)
goto err_xdp_ring;
}
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
napi_enable(&rq->xdp_napi);
@ -947,19 +947,25 @@ static int __veth_napi_enable(struct net_device *dev)
}
return 0;
err_xdp_ring:
for (i--; i >= 0; i--)
for (i--; i >= start; i--)
ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
return err;
}
static void veth_napi_del(struct net_device *dev)
static int __veth_napi_enable(struct net_device *dev)
{
return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
}
static void veth_napi_del_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int i;
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
rcu_assign_pointer(priv->rq[i].napi, NULL);
@ -968,7 +974,7 @@ static void veth_napi_del(struct net_device *dev)
}
synchronize_net();
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
rq->rx_notify_masked = false;
@ -976,19 +982,23 @@ static void veth_napi_del(struct net_device *dev)
}
}
static void veth_napi_del(struct net_device *dev)
{
veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
}
static bool veth_gro_requested(const struct net_device *dev)
{
return !!(dev->wanted_features & NETIF_F_GRO);
}
static int veth_enable_xdp(struct net_device *dev)
static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
bool napi_already_on)
{
bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
struct veth_priv *priv = netdev_priv(dev);
int err, i;
if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
if (!napi_already_on)
@ -1006,11 +1016,56 @@ static int veth_enable_xdp(struct net_device *dev)
/* Save original mem info as it can be overwritten */
rq->xdp_mem = rq->xdp_rxq.mem;
}
return 0;
err_reg_mem:
xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
err_rxq_reg:
for (i--; i >= start; i--) {
struct veth_rq *rq = &priv->rq[i];
xdp_rxq_info_unreg(&rq->xdp_rxq);
if (!napi_already_on)
netif_napi_del(&rq->xdp_napi);
}
return err;
}
static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
bool delete_napi)
{
struct veth_priv *priv = netdev_priv(dev);
int i;
for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
rq->xdp_rxq.mem = rq->xdp_mem;
xdp_rxq_info_unreg(&rq->xdp_rxq);
if (delete_napi)
netif_napi_del(&rq->xdp_napi);
}
}
static int veth_enable_xdp(struct net_device *dev)
{
bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
struct veth_priv *priv = netdev_priv(dev);
int err, i;
if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
if (err)
return err;
if (!napi_already_on) {
err = __veth_napi_enable(dev);
if (err)
goto err_rxq_reg;
if (err) {
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
return err;
}
if (!veth_gro_requested(dev)) {
/* user-space did not require GRO, but adding XDP
@ -1028,18 +1083,6 @@ static int veth_enable_xdp(struct net_device *dev)
}
return 0;
err_reg_mem:
xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
err_rxq_reg:
for (i--; i >= 0; i--) {
struct veth_rq *rq = &priv->rq[i];
xdp_rxq_info_unreg(&rq->xdp_rxq);
if (!napi_already_on)
netif_napi_del(&rq->xdp_napi);
}
return err;
}
static void veth_disable_xdp(struct net_device *dev)
@ -1062,28 +1105,23 @@ static void veth_disable_xdp(struct net_device *dev)
}
}
for (i = 0; i < dev->real_num_rx_queues; i++) {
struct veth_rq *rq = &priv->rq[i];
rq->xdp_rxq.mem = rq->xdp_mem;
xdp_rxq_info_unreg(&rq->xdp_rxq);
}
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
}
static int veth_napi_enable(struct net_device *dev)
static int veth_napi_enable_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int err, i;
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
}
err = __veth_napi_enable(dev);
err = __veth_napi_enable_range(dev, start, end);
if (err) {
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
netif_napi_del(&rq->xdp_napi);
@ -1093,6 +1131,11 @@ static int veth_napi_enable(struct net_device *dev)
return err;
}
static int veth_napi_enable(struct net_device *dev)
{
return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
}
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);