xen-netfront: recreate queues correctly when reconnecting
When reconnecting to the backend (after a resume/migration, for example), a different number of queues may be required (since the guest may have moved to a different host with different capabilities). During the reconnection the old queues are torn down and new ones created. Introduce xennet_create_queues() and xennet_destroy_queues() that fixes three bugs during the reconnection. - The old info->queues was leaked. - The old queue's napi instances were not deleted. - The new queue's napi instances were left disabled (which meant no packets could be received). The xennet_destroy_queues() calls is deferred until the reconnection instead of the disconnection (in xennet_disconnect_backend()) because napi_disable() might sleep. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
765418694b
commit
ce58725fec
|
@ -1699,8 +1699,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
|
||||||
goto exit_free_tx;
|
goto exit_free_tx;
|
||||||
}
|
}
|
||||||
|
|
||||||
netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
exit_free_tx:
|
exit_free_tx:
|
||||||
|
@ -1791,6 +1789,70 @@ error:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void xennet_destroy_queues(struct netfront_info *info)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
|
|
||||||
|
for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
|
||||||
|
struct netfront_queue *queue = &info->queues[i];
|
||||||
|
|
||||||
|
if (netif_running(info->netdev))
|
||||||
|
napi_disable(&queue->napi);
|
||||||
|
netif_napi_del(&queue->napi);
|
||||||
|
}
|
||||||
|
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
|
kfree(info->queues);
|
||||||
|
info->queues = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xennet_create_queues(struct netfront_info *info,
|
||||||
|
unsigned int num_queues)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!info->queues)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
|
|
||||||
|
for (i = 0; i < num_queues; i++) {
|
||||||
|
struct netfront_queue *queue = &info->queues[i];
|
||||||
|
|
||||||
|
queue->id = i;
|
||||||
|
queue->info = info;
|
||||||
|
|
||||||
|
ret = xennet_init_queue(queue);
|
||||||
|
if (ret < 0) {
|
||||||
|
dev_warn(&info->netdev->dev, "only created %d queues\n",
|
||||||
|
num_queues);
|
||||||
|
num_queues = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
netif_napi_add(queue->info->netdev, &queue->napi,
|
||||||
|
xennet_poll, 64);
|
||||||
|
if (netif_running(info->netdev))
|
||||||
|
napi_enable(&queue->napi);
|
||||||
|
}
|
||||||
|
|
||||||
|
netif_set_real_num_tx_queues(info->netdev, num_queues);
|
||||||
|
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
|
if (num_queues == 0) {
|
||||||
|
dev_err(&info->netdev->dev, "no queues\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Common code used when first setting up, and when resuming. */
|
/* Common code used when first setting up, and when resuming. */
|
||||||
static int talk_to_netback(struct xenbus_device *dev,
|
static int talk_to_netback(struct xenbus_device *dev,
|
||||||
struct netfront_info *info)
|
struct netfront_info *info)
|
||||||
|
@ -1827,42 +1889,20 @@ static int talk_to_netback(struct xenbus_device *dev,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate array of queues */
|
if (info->queues)
|
||||||
info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL);
|
xennet_destroy_queues(info);
|
||||||
if (!info->queues) {
|
|
||||||
err = -ENOMEM;
|
err = xennet_create_queues(info, num_queues);
|
||||||
goto out;
|
if (err < 0)
|
||||||
}
|
goto destroy_ring;
|
||||||
rtnl_lock();
|
|
||||||
netif_set_real_num_tx_queues(info->netdev, num_queues);
|
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
/* Create shared ring, alloc event channel -- for each queue */
|
/* Create shared ring, alloc event channel -- for each queue */
|
||||||
for (i = 0; i < num_queues; ++i) {
|
for (i = 0; i < num_queues; ++i) {
|
||||||
queue = &info->queues[i];
|
queue = &info->queues[i];
|
||||||
queue->id = i;
|
|
||||||
queue->info = info;
|
|
||||||
err = xennet_init_queue(queue);
|
|
||||||
if (err) {
|
|
||||||
/* xennet_init_queue() cleans up after itself on failure,
|
|
||||||
* but we still have to clean up any previously initialised
|
|
||||||
* queues. If i > 0, set num_queues to i, then goto
|
|
||||||
* destroy_ring, which calls xennet_disconnect_backend()
|
|
||||||
* to tidy up.
|
|
||||||
*/
|
|
||||||
if (i > 0) {
|
|
||||||
rtnl_lock();
|
|
||||||
netif_set_real_num_tx_queues(info->netdev, i);
|
|
||||||
rtnl_unlock();
|
|
||||||
goto destroy_ring;
|
|
||||||
} else {
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = setup_netfront(dev, queue, feature_split_evtchn);
|
err = setup_netfront(dev, queue, feature_split_evtchn);
|
||||||
if (err) {
|
if (err) {
|
||||||
/* As for xennet_init_queue(), setup_netfront() will tidy
|
/* setup_netfront() will tidy up the current
|
||||||
* up the current queue on error, but we need to clean up
|
* queue on error, but we need to clean up
|
||||||
* those already allocated.
|
* those already allocated.
|
||||||
*/
|
*/
|
||||||
if (i > 0) {
|
if (i > 0) {
|
||||||
|
|
Loading…
Reference in New Issue