drm/i915/gvt: Revert "drm/i915/gvt: Fix possible recursive locking issue"

This reverts commit 62d02fd1f8.

The rwsem recursive trace should not be fixed from kvmgt side by using
a workqueue and it is an issue should be fixed in VFIO. So this one
should be reverted.

Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: stable@vger.kernel.org # v4.10+
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
This commit is contained in:
Chuanxiao Dong 2017-07-07 13:21:52 +08:00 committed by Zhenyu Wang
parent 3364bf5fd0
commit 08673c3e27
2 changed files with 10 additions and 48 deletions

View File

@ -182,9 +182,6 @@ struct intel_vgpu {
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
struct work_struct unpin_work;
spinlock_t unpin_lock; /* To protect unpin_list */
struct list_head unpin_list;
} vdev;
#endif
};

View File

@ -78,7 +78,6 @@ struct gvt_dma {
struct rb_node node;
gfn_t gfn;
unsigned long iova;
struct list_head list;
};
static inline bool handle_valid(unsigned long handle)
@ -167,7 +166,6 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
new->gfn = gfn;
new->iova = iova;
INIT_LIST_HEAD(&new->list);
mutex_lock(&vgpu->vdev.cache_lock);
while (*link) {
@ -199,52 +197,26 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
kfree(entry);
}
static void intel_vgpu_unpin_work(struct work_struct *work)
static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
vdev.unpin_work);
struct device *dev = mdev_dev(vgpu->vdev.mdev);
struct gvt_dma *this;
unsigned long gfn;
for (;;) {
spin_lock(&vgpu->vdev.unpin_lock);
if (list_empty(&vgpu->vdev.unpin_list)) {
spin_unlock(&vgpu->vdev.unpin_lock);
break;
}
this = list_first_entry(&vgpu->vdev.unpin_list,
struct gvt_dma, list);
list_del(&this->list);
spin_unlock(&vgpu->vdev.unpin_lock);
gfn = this->gfn;
vfio_unpin_pages(dev, &gfn, 1);
kfree(this);
}
}
static bool gvt_cache_mark_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct gvt_dma *this;
unsigned long g1;
int rc;
mutex_lock(&vgpu->vdev.cache_lock);
this = __gvt_cache_find(vgpu, gfn);
if (!this) {
mutex_unlock(&vgpu->vdev.cache_lock);
return false;
return;
}
g1 = gfn;
gvt_dma_unmap_iova(vgpu, this->iova);
/* remove this from rb tree */
rb_erase(&this->node, &vgpu->vdev.cache);
rc = vfio_unpin_pages(dev, &g1, 1);
WARN_ON(rc != 1);
__gvt_cache_remove_entry(vgpu, this);
mutex_unlock(&vgpu->vdev.cache_lock);
/* put this to the unpin_list */
spin_lock(&vgpu->vdev.unpin_lock);
list_move_tail(&this->list, &vgpu->vdev.unpin_list);
spin_unlock(&vgpu->vdev.unpin_lock);
return true;
}
static void gvt_cache_init(struct intel_vgpu *vgpu)
@ -485,9 +457,6 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
}
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
INIT_WORK(&vgpu->vdev.unpin_work, intel_vgpu_unpin_work);
spin_lock_init(&vgpu->vdev.unpin_lock);
INIT_LIST_HEAD(&vgpu->vdev.unpin_list);
vgpu->vdev.mdev = mdev;
mdev_set_drvdata(mdev, vgpu);
@ -517,7 +486,6 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
struct intel_vgpu *vgpu = container_of(nb,
struct intel_vgpu,
vdev.iommu_notifier);
bool sched_unmap = false;
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
@ -527,10 +495,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
end_gfn = gfn + unmap->size / PAGE_SIZE;
while (gfn < end_gfn)
sched_unmap |= gvt_cache_mark_remove(vgpu, gfn++);
if (sched_unmap)
schedule_work(&vgpu->vdev.unpin_work);
gvt_cache_remove(vgpu, gfn++);
}
return NOTIFY_OK;