vfio: Simplify vfio_create_group()
The vfio.group_lock is now only used to serialize vfio_group creation and destruction, we don't need a micro-optimization of searching, unlocking, then allocating and searching again. Just hold the lock the whole time. Grabbed from: https://lore.kernel.org/kvm/20220922152338.2a2238fe.alex.williamson@redhat.com/ Link: https://lore.kernel.org/r/20221201145535.589687-2-yi.l.liu@intel.com Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Tested-by: Lixiao Yang <lixiao.yang@intel.com> Tested-by: Yu He <yu.he@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Yi Liu <yi.l.liu@intel.com>
This commit is contained in:
parent
90337f526c
commit
f794eec86c
|
@ -143,10 +143,12 @@ EXPORT_SYMBOL_GPL(vfio_device_set_open_count);
|
||||||
* Group objects - create, release, get, put, search
|
* Group objects - create, release, get, put, search
|
||||||
*/
|
*/
|
||||||
static struct vfio_group *
|
static struct vfio_group *
|
||||||
__vfio_group_get_from_iommu(struct iommu_group *iommu_group)
|
vfio_group_get_from_iommu(struct iommu_group *iommu_group)
|
||||||
{
|
{
|
||||||
struct vfio_group *group;
|
struct vfio_group *group;
|
||||||
|
|
||||||
|
lockdep_assert_held(&vfio.group_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* group->iommu_group from the vfio.group_list cannot be NULL
|
* group->iommu_group from the vfio.group_list cannot be NULL
|
||||||
* under the vfio.group_lock.
|
* under the vfio.group_lock.
|
||||||
|
@ -160,17 +162,6 @@ __vfio_group_get_from_iommu(struct iommu_group *iommu_group)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vfio_group *
|
|
||||||
vfio_group_get_from_iommu(struct iommu_group *iommu_group)
|
|
||||||
{
|
|
||||||
struct vfio_group *group;
|
|
||||||
|
|
||||||
mutex_lock(&vfio.group_lock);
|
|
||||||
group = __vfio_group_get_from_iommu(iommu_group);
|
|
||||||
mutex_unlock(&vfio.group_lock);
|
|
||||||
return group;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void vfio_group_release(struct device *dev)
|
static void vfio_group_release(struct device *dev)
|
||||||
{
|
{
|
||||||
struct vfio_group *group = container_of(dev, struct vfio_group, dev);
|
struct vfio_group *group = container_of(dev, struct vfio_group, dev);
|
||||||
|
@ -225,6 +216,8 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
|
||||||
struct vfio_group *ret;
|
struct vfio_group *ret;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
lockdep_assert_held(&vfio.group_lock);
|
||||||
|
|
||||||
group = vfio_group_alloc(iommu_group, type);
|
group = vfio_group_alloc(iommu_group, type);
|
||||||
if (IS_ERR(group))
|
if (IS_ERR(group))
|
||||||
return group;
|
return group;
|
||||||
|
@ -237,26 +230,16 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
|
||||||
goto err_put;
|
goto err_put;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&vfio.group_lock);
|
|
||||||
|
|
||||||
/* Did we race creating this group? */
|
|
||||||
ret = __vfio_group_get_from_iommu(iommu_group);
|
|
||||||
if (ret)
|
|
||||||
goto err_unlock;
|
|
||||||
|
|
||||||
err = cdev_device_add(&group->cdev, &group->dev);
|
err = cdev_device_add(&group->cdev, &group->dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
ret = ERR_PTR(err);
|
ret = ERR_PTR(err);
|
||||||
goto err_unlock;
|
goto err_put;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_add(&group->vfio_next, &vfio.group_list);
|
list_add(&group->vfio_next, &vfio.group_list);
|
||||||
|
|
||||||
mutex_unlock(&vfio.group_lock);
|
|
||||||
return group;
|
return group;
|
||||||
|
|
||||||
err_unlock:
|
|
||||||
mutex_unlock(&vfio.group_lock);
|
|
||||||
err_put:
|
err_put:
|
||||||
put_device(&group->dev);
|
put_device(&group->dev);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -467,7 +450,9 @@ static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_put_group;
|
goto out_put_group;
|
||||||
|
|
||||||
|
mutex_lock(&vfio.group_lock);
|
||||||
group = vfio_create_group(iommu_group, type);
|
group = vfio_create_group(iommu_group, type);
|
||||||
|
mutex_unlock(&vfio.group_lock);
|
||||||
if (IS_ERR(group)) {
|
if (IS_ERR(group)) {
|
||||||
ret = PTR_ERR(group);
|
ret = PTR_ERR(group);
|
||||||
goto out_remove_device;
|
goto out_remove_device;
|
||||||
|
@ -516,9 +501,11 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&vfio.group_lock);
|
||||||
group = vfio_group_get_from_iommu(iommu_group);
|
group = vfio_group_get_from_iommu(iommu_group);
|
||||||
if (!group)
|
if (!group)
|
||||||
group = vfio_create_group(iommu_group, VFIO_IOMMU);
|
group = vfio_create_group(iommu_group, VFIO_IOMMU);
|
||||||
|
mutex_unlock(&vfio.group_lock);
|
||||||
|
|
||||||
/* The vfio_group holds a reference to the iommu_group */
|
/* The vfio_group holds a reference to the iommu_group */
|
||||||
iommu_group_put(iommu_group);
|
iommu_group_put(iommu_group);
|
||||||
|
|
Loading…
Reference in New Issue