vhost_vdpa: remove unnecessary spin_lock in vhost_vring_call
This commit removed unnecessary spin_locks in vhost_vring_call and related operations. Because we manipulate irq offloading contents in vhost_vdpa ioctl code path which is already protected by dev mutex and vq mutex. Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com> Link: https://lore.kernel.org/r/20200909065234.3313-1-lingshan.zhu@intel.com Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
parent
5745bcfbbf
commit
86e182fe12
|
@ -96,26 +96,20 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
|
|||
return;
|
||||
|
||||
irq = ops->get_vq_irq(vdpa, qid);
|
||||
spin_lock(&vq->call_ctx.ctx_lock);
|
||||
irq_bypass_unregister_producer(&vq->call_ctx.producer);
|
||||
if (!vq->call_ctx.ctx || irq < 0) {
|
||||
spin_unlock(&vq->call_ctx.ctx_lock);
|
||||
if (!vq->call_ctx.ctx || irq < 0)
|
||||
return;
|
||||
}
|
||||
|
||||
vq->call_ctx.producer.token = vq->call_ctx.ctx;
|
||||
vq->call_ctx.producer.irq = irq;
|
||||
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
|
||||
spin_unlock(&vq->call_ctx.ctx_lock);
|
||||
}
|
||||
|
||||
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
|
||||
{
|
||||
struct vhost_virtqueue *vq = &v->vqs[qid];
|
||||
|
||||
spin_lock(&vq->call_ctx.ctx_lock);
|
||||
irq_bypass_unregister_producer(&vq->call_ctx.producer);
|
||||
spin_unlock(&vq->call_ctx.ctx_lock);
|
||||
}
|
||||
|
||||
static void vhost_vdpa_reset(struct vhost_vdpa *v)
|
||||
|
|
|
@ -302,7 +302,6 @@ static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
|
|||
{
|
||||
call_ctx->ctx = NULL;
|
||||
memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
|
||||
spin_lock_init(&call_ctx->ctx_lock);
|
||||
}
|
||||
|
||||
static void vhost_vq_reset(struct vhost_dev *dev,
|
||||
|
@ -1650,9 +1649,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
|
|||
break;
|
||||
}
|
||||
|
||||
spin_lock(&vq->call_ctx.ctx_lock);
|
||||
swap(ctx, vq->call_ctx.ctx);
|
||||
spin_unlock(&vq->call_ctx.ctx_lock);
|
||||
break;
|
||||
case VHOST_SET_VRING_ERR:
|
||||
if (copy_from_user(&f, argp, sizeof f)) {
|
||||
|
|
|
@ -64,7 +64,6 @@ enum vhost_uaddr_type {
|
|||
struct vhost_vring_call {
|
||||
struct eventfd_ctx *ctx;
|
||||
struct irq_bypass_producer producer;
|
||||
spinlock_t ctx_lock;
|
||||
};
|
||||
|
||||
/* The virtqueue structure describes a queue attached to a device. */
|
||||
|
|
Loading…
Reference in New Issue