hv_netvsc: Fix unwanted wakeup after tx_disable
After queue stopped, the wakeup mechanism may wake it up again
when ring buffer usage is lower than a threshold. This may cause
send path panic on NULL pointer when we stopped all tx queues in
netvsc_detach and start removing the netvsc device.
This patch fix it by adding a tx_disable flag to prevent unwanted
queue wakeup.
Fixes: 7b2ee50c0c
("hv_netvsc: common detach logic")
Reported-by: Mohammed Gamal <mgamal@redhat.com>
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
18bebc6dd3
commit
1b704c4a1b
|
@ -987,6 +987,7 @@ struct netvsc_device {
|
||||||
|
|
||||||
wait_queue_head_t wait_drain;
|
wait_queue_head_t wait_drain;
|
||||||
bool destroy;
|
bool destroy;
|
||||||
|
bool tx_disable; /* if true, do not wake up queue again */
|
||||||
|
|
||||||
/* Receive buffer allocated by us but manages by NetVSP */
|
/* Receive buffer allocated by us but manages by NetVSP */
|
||||||
void *recv_buf;
|
void *recv_buf;
|
||||||
|
|
|
@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
|
||||||
|
|
||||||
init_waitqueue_head(&net_device->wait_drain);
|
init_waitqueue_head(&net_device->wait_drain);
|
||||||
net_device->destroy = false;
|
net_device->destroy = false;
|
||||||
|
net_device->tx_disable = false;
|
||||||
|
|
||||||
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
|
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
|
||||||
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
|
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
|
||||||
|
@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
|
||||||
} else {
|
} else {
|
||||||
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
|
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
|
||||||
|
|
||||||
if (netif_tx_queue_stopped(txq) &&
|
if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
|
||||||
(hv_get_avail_to_write_percent(&channel->outbound) >
|
(hv_get_avail_to_write_percent(&channel->outbound) >
|
||||||
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
|
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
|
||||||
netif_tx_wake_queue(txq);
|
netif_tx_wake_queue(txq);
|
||||||
|
@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
|
||||||
} else if (ret == -EAGAIN) {
|
} else if (ret == -EAGAIN) {
|
||||||
netif_tx_stop_queue(txq);
|
netif_tx_stop_queue(txq);
|
||||||
ndev_ctx->eth_stats.stop_queue++;
|
ndev_ctx->eth_stats.stop_queue++;
|
||||||
if (atomic_read(&nvchan->queue_sends) < 1) {
|
if (atomic_read(&nvchan->queue_sends) < 1 &&
|
||||||
|
!net_device->tx_disable) {
|
||||||
netif_tx_wake_queue(txq);
|
netif_tx_wake_queue(txq);
|
||||||
ndev_ctx->eth_stats.wake_queue++;
|
ndev_ctx->eth_stats.wake_queue++;
|
||||||
ret = -ENOSPC;
|
ret = -ENOSPC;
|
||||||
|
|
|
@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void netvsc_tx_enable(struct netvsc_device *nvscdev,
|
||||||
|
struct net_device *ndev)
|
||||||
|
{
|
||||||
|
nvscdev->tx_disable = false;
|
||||||
|
virt_wmb(); /* ensure queue wake up mechanism is on */
|
||||||
|
|
||||||
|
netif_tx_wake_all_queues(ndev);
|
||||||
|
}
|
||||||
|
|
||||||
static int netvsc_open(struct net_device *net)
|
static int netvsc_open(struct net_device *net)
|
||||||
{
|
{
|
||||||
struct net_device_context *ndev_ctx = netdev_priv(net);
|
struct net_device_context *ndev_ctx = netdev_priv(net);
|
||||||
|
@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
|
||||||
rdev = nvdev->extension;
|
rdev = nvdev->extension;
|
||||||
if (!rdev->link_state) {
|
if (!rdev->link_state) {
|
||||||
netif_carrier_on(net);
|
netif_carrier_on(net);
|
||||||
netif_tx_wake_all_queues(net);
|
netvsc_tx_enable(nvdev, net);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vf_netdev) {
|
if (vf_netdev) {
|
||||||
|
@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void netvsc_tx_disable(struct netvsc_device *nvscdev,
|
||||||
|
struct net_device *ndev)
|
||||||
|
{
|
||||||
|
if (nvscdev) {
|
||||||
|
nvscdev->tx_disable = true;
|
||||||
|
virt_wmb(); /* ensure txq will not wake up after stop */
|
||||||
|
}
|
||||||
|
|
||||||
|
netif_tx_disable(ndev);
|
||||||
|
}
|
||||||
|
|
||||||
static int netvsc_close(struct net_device *net)
|
static int netvsc_close(struct net_device *net)
|
||||||
{
|
{
|
||||||
struct net_device_context *net_device_ctx = netdev_priv(net);
|
struct net_device_context *net_device_ctx = netdev_priv(net);
|
||||||
|
@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
|
||||||
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
|
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
netif_tx_disable(net);
|
netvsc_tx_disable(nvdev, net);
|
||||||
|
|
||||||
/* No need to close rndis filter if it is removed already */
|
/* No need to close rndis filter if it is removed already */
|
||||||
if (!nvdev)
|
if (!nvdev)
|
||||||
|
@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
|
||||||
|
|
||||||
/* If device was up (receiving) then shutdown */
|
/* If device was up (receiving) then shutdown */
|
||||||
if (netif_running(ndev)) {
|
if (netif_running(ndev)) {
|
||||||
netif_tx_disable(ndev);
|
netvsc_tx_disable(nvdev, ndev);
|
||||||
|
|
||||||
ret = rndis_filter_close(nvdev);
|
ret = rndis_filter_close(nvdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
|
||||||
if (rdev->link_state) {
|
if (rdev->link_state) {
|
||||||
rdev->link_state = false;
|
rdev->link_state = false;
|
||||||
netif_carrier_on(net);
|
netif_carrier_on(net);
|
||||||
netif_tx_wake_all_queues(net);
|
netvsc_tx_enable(net_device, net);
|
||||||
} else {
|
} else {
|
||||||
notify = true;
|
notify = true;
|
||||||
}
|
}
|
||||||
|
@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
|
||||||
if (!rdev->link_state) {
|
if (!rdev->link_state) {
|
||||||
rdev->link_state = true;
|
rdev->link_state = true;
|
||||||
netif_carrier_off(net);
|
netif_carrier_off(net);
|
||||||
netif_tx_stop_all_queues(net);
|
netvsc_tx_disable(net_device, net);
|
||||||
}
|
}
|
||||||
kfree(event);
|
kfree(event);
|
||||||
break;
|
break;
|
||||||
|
@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
|
||||||
if (!rdev->link_state) {
|
if (!rdev->link_state) {
|
||||||
rdev->link_state = true;
|
rdev->link_state = true;
|
||||||
netif_carrier_off(net);
|
netif_carrier_off(net);
|
||||||
netif_tx_stop_all_queues(net);
|
netvsc_tx_disable(net_device, net);
|
||||||
event->event = RNDIS_STATUS_MEDIA_CONNECT;
|
event->event = RNDIS_STATUS_MEDIA_CONNECT;
|
||||||
spin_lock_irqsave(&ndev_ctx->lock, flags);
|
spin_lock_irqsave(&ndev_ctx->lock, flags);
|
||||||
list_add(&event->list, &ndev_ctx->reconfig_events);
|
list_add(&event->list, &ndev_ctx->reconfig_events);
|
||||||
|
|
Loading…
Reference in New Issue