virtio: Protect vqs list access
VQs may be accessed to mark the device broken while they are
created/destroyed. Hence protect the access to the vqs list.
Fixes: e2dcdfe95c
("virtio: virtio_break_device() to mark all virtqueues broken.")
Signed-off-by: Parav Pandit <parav@nvidia.com>
Link: https://lore.kernel.org/r/20210721142648.1525924-4-parav@nvidia.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
249f255476
commit
0e566c8f0f
|
@ -355,6 +355,7 @@ int register_virtio_device(struct virtio_device *dev)
|
||||||
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
|
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dev->vqs);
|
INIT_LIST_HEAD(&dev->vqs);
|
||||||
|
spin_lock_init(&dev->vqs_list_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* device_add() causes the bus infrastructure to look for a matching
|
* device_add() causes the bus infrastructure to look for a matching
|
||||||
|
|
|
@ -1755,7 +1755,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
|
||||||
cpu_to_le16(vq->packed.event_flags_shadow);
|
cpu_to_le16(vq->packed.event_flags_shadow);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock(&vdev->vqs_list_lock);
|
||||||
list_add_tail(&vq->vq.list, &vdev->vqs);
|
list_add_tail(&vq->vq.list, &vdev->vqs);
|
||||||
|
spin_unlock(&vdev->vqs_list_lock);
|
||||||
return &vq->vq;
|
return &vq->vq;
|
||||||
|
|
||||||
err_desc_extra:
|
err_desc_extra:
|
||||||
|
@ -2229,7 +2231,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
|
||||||
memset(vq->split.desc_state, 0, vring.num *
|
memset(vq->split.desc_state, 0, vring.num *
|
||||||
sizeof(struct vring_desc_state_split));
|
sizeof(struct vring_desc_state_split));
|
||||||
|
|
||||||
|
spin_lock(&vdev->vqs_list_lock);
|
||||||
list_add_tail(&vq->vq.list, &vdev->vqs);
|
list_add_tail(&vq->vq.list, &vdev->vqs);
|
||||||
|
spin_unlock(&vdev->vqs_list_lock);
|
||||||
return &vq->vq;
|
return &vq->vq;
|
||||||
|
|
||||||
err_extra:
|
err_extra:
|
||||||
|
@ -2291,7 +2295,9 @@ void vring_del_virtqueue(struct virtqueue *_vq)
|
||||||
{
|
{
|
||||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||||
|
|
||||||
|
spin_lock(&vq->vq.vdev->vqs_list_lock);
|
||||||
list_del(&_vq->list);
|
list_del(&_vq->list);
|
||||||
|
spin_unlock(&vq->vq.vdev->vqs_list_lock);
|
||||||
|
|
||||||
if (vq->we_own_ring) {
|
if (vq->we_own_ring) {
|
||||||
if (vq->packed_ring) {
|
if (vq->packed_ring) {
|
||||||
|
@ -2386,12 +2392,14 @@ void virtio_break_device(struct virtio_device *dev)
|
||||||
{
|
{
|
||||||
struct virtqueue *_vq;
|
struct virtqueue *_vq;
|
||||||
|
|
||||||
|
spin_lock(&dev->vqs_list_lock);
|
||||||
list_for_each_entry(_vq, &dev->vqs, list) {
|
list_for_each_entry(_vq, &dev->vqs, list) {
|
||||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||||
|
|
||||||
/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
|
/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
|
||||||
WRITE_ONCE(vq->broken, true);
|
WRITE_ONCE(vq->broken, true);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&dev->vqs_list_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(virtio_break_device);
|
EXPORT_SYMBOL_GPL(virtio_break_device);
|
||||||
|
|
||||||
|
|
|
@ -110,6 +110,7 @@ struct virtio_device {
|
||||||
bool config_enabled;
|
bool config_enabled;
|
||||||
bool config_change_pending;
|
bool config_change_pending;
|
||||||
spinlock_t config_lock;
|
spinlock_t config_lock;
|
||||||
|
spinlock_t vqs_list_lock; /* Protects VQs list access */
|
||||||
struct device dev;
|
struct device dev;
|
||||||
struct virtio_device_id id;
|
struct virtio_device_id id;
|
||||||
const struct virtio_config_ops *config;
|
const struct virtio_config_ops *config;
|
||||||
|
|
Loading…
Reference in New Issue