vdpa_sim: replace the spinlock with a mutex to protect the state
The spinlock we use to protect the state of the simulator is sometimes held for a long time (for example, when devices handle requests). This also prevents us from calling functions that might sleep (such as kthread_flush_work() in the next patch), and thus having to release and retake the lock. For these reasons, let's replace the spinlock with a mutex that gives us more flexibility. Suggested-by: Jason Wang <jasowang@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Message-Id: <20230404131730.45920-1-sgarzare@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
76acfa7bc5
commit
d7621c28fc
drivers/vdpa/vdpa_sim
|
@ -178,7 +178,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
|
|||
if (IS_ERR(vdpasim->worker))
|
||||
goto err_iommu;
|
||||
|
||||
spin_lock_init(&vdpasim->lock);
|
||||
mutex_init(&vdpasim->mutex);
|
||||
spin_lock_init(&vdpasim->iommu_lock);
|
||||
|
||||
dev->dma_mask = &dev->coherent_dma_mask;
|
||||
|
@ -286,13 +286,13 @@ static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
|
|||
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
|
||||
bool old_ready;
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
mutex_lock(&vdpasim->mutex);
|
||||
old_ready = vq->ready;
|
||||
vq->ready = ready;
|
||||
if (vq->ready && !old_ready) {
|
||||
vdpasim_queue_ready(vdpasim, idx);
|
||||
}
|
||||
spin_unlock(&vdpasim->lock);
|
||||
mutex_unlock(&vdpasim->mutex);
|
||||
}
|
||||
|
||||
static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
|
||||
|
@ -310,9 +310,9 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
|
|||
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
|
||||
struct vringh *vrh = &vq->vring;
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
mutex_lock(&vdpasim->mutex);
|
||||
vrh->last_avail_idx = state->split.avail_index;
|
||||
spin_unlock(&vdpasim->lock);
|
||||
mutex_unlock(&vdpasim->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -409,9 +409,9 @@ static u8 vdpasim_get_status(struct vdpa_device *vdpa)
|
|||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
u8 status;
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
mutex_lock(&vdpasim->mutex);
|
||||
status = vdpasim->status;
|
||||
spin_unlock(&vdpasim->lock);
|
||||
mutex_unlock(&vdpasim->mutex);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -420,19 +420,19 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
|
|||
{
|
||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
mutex_lock(&vdpasim->mutex);
|
||||
vdpasim->status = status;
|
||||
spin_unlock(&vdpasim->lock);
|
||||
mutex_unlock(&vdpasim->mutex);
|
||||
}
|
||||
|
||||
static int vdpasim_reset(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
mutex_lock(&vdpasim->mutex);
|
||||
vdpasim->status = 0;
|
||||
vdpasim_do_reset(vdpasim);
|
||||
spin_unlock(&vdpasim->lock);
|
||||
mutex_unlock(&vdpasim->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -441,9 +441,9 @@ static int vdpasim_suspend(struct vdpa_device *vdpa)
|
|||
{
|
||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
mutex_lock(&vdpasim->mutex);
|
||||
vdpasim->running = false;
|
||||
spin_unlock(&vdpasim->lock);
|
||||
mutex_unlock(&vdpasim->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -453,7 +453,7 @@ static int vdpasim_resume(struct vdpa_device *vdpa)
|
|||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
int i;
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
mutex_lock(&vdpasim->mutex);
|
||||
vdpasim->running = true;
|
||||
|
||||
if (vdpasim->pending_kick) {
|
||||
|
@ -464,7 +464,7 @@ static int vdpasim_resume(struct vdpa_device *vdpa)
|
|||
vdpasim->pending_kick = false;
|
||||
}
|
||||
|
||||
spin_unlock(&vdpasim->lock);
|
||||
mutex_unlock(&vdpasim->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -536,14 +536,14 @@ static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
|
|||
|
||||
iommu = &vdpasim->iommu[asid];
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
mutex_lock(&vdpasim->mutex);
|
||||
|
||||
for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
|
||||
if (vdpasim_get_vq_group(vdpa, i) == group)
|
||||
vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
|
||||
&vdpasim->iommu_lock);
|
||||
|
||||
spin_unlock(&vdpasim->lock);
|
||||
mutex_unlock(&vdpasim->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -60,8 +60,8 @@ struct vdpasim {
|
|||
struct kthread_worker *worker;
|
||||
struct kthread_work work;
|
||||
struct vdpasim_dev_attr dev_attr;
|
||||
/* spinlock to synchronize virtqueue state */
|
||||
spinlock_t lock;
|
||||
/* mutex to synchronize virtqueue state */
|
||||
struct mutex mutex;
|
||||
/* virtio config according to device type */
|
||||
void *config;
|
||||
struct vhost_iotlb *iommu;
|
||||
|
|
|
@ -290,7 +290,7 @@ static void vdpasim_blk_work(struct vdpasim *vdpasim)
|
|||
bool reschedule = false;
|
||||
int i;
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
mutex_lock(&vdpasim->mutex);
|
||||
|
||||
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||
goto out;
|
||||
|
@ -321,7 +321,7 @@ static void vdpasim_blk_work(struct vdpasim *vdpasim)
|
|||
}
|
||||
}
|
||||
out:
|
||||
spin_unlock(&vdpasim->lock);
|
||||
mutex_unlock(&vdpasim->mutex);
|
||||
|
||||
if (reschedule)
|
||||
vdpasim_schedule_work(vdpasim);
|
||||
|
|
|
@ -201,7 +201,7 @@ static void vdpasim_net_work(struct vdpasim *vdpasim)
|
|||
u64 rx_drops = 0, rx_overruns = 0, rx_errors = 0, tx_errors = 0;
|
||||
int err;
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
mutex_lock(&vdpasim->mutex);
|
||||
|
||||
if (!vdpasim->running)
|
||||
goto out;
|
||||
|
@ -264,7 +264,7 @@ static void vdpasim_net_work(struct vdpasim *vdpasim)
|
|||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&vdpasim->lock);
|
||||
mutex_unlock(&vdpasim->mutex);
|
||||
|
||||
u64_stats_update_begin(&net->tx_stats.syncp);
|
||||
net->tx_stats.pkts += tx_pkts;
|
||||
|
|
Loading…
Reference in New Issue