vfio/iommu_type1: remove the "external" domain

The external_domain concept rather misleading and not actually needed.
Replace it with a list of emulated groups in struct vfio_iommu.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20210924155705.4258-15-hch@lst.de
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Christoph Hellwig 2021-09-24 17:57:04 +02:00 committed by Alex Williamson
parent 65cdbf1063
commit 296e505bad
1 changed files with 54 additions and 66 deletions

View File

@ -65,7 +65,6 @@ MODULE_PARM_DESC(dma_entry_limit,
struct vfio_iommu {
struct list_head domain_list;
struct list_head iova_list;
struct vfio_domain *external_domain; /* domain for external user */
struct mutex lock;
struct rb_root dma_list;
struct blocking_notifier_head notifier;
@ -78,6 +77,7 @@ struct vfio_iommu {
bool nesting;
bool dirty_page_tracking;
bool container_open;
struct list_head emulated_iommu_groups;
};
struct vfio_domain {
@ -1892,8 +1892,8 @@ static struct vfio_iommu_group*
vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group)
{
struct vfio_iommu_group *group;
struct vfio_domain *domain;
struct vfio_iommu_group *group = NULL;
list_for_each_entry(domain, &iommu->domain_list, next) {
group = find_iommu_group(domain, iommu_group);
@ -1901,10 +1901,10 @@ vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
return group;
}
if (iommu->external_domain)
group = find_iommu_group(iommu->external_domain, iommu_group);
list_for_each_entry(group, &iommu->emulated_iommu_groups, next)
if (group->iommu_group == iommu_group)
return group;
return NULL;
}
static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
@ -2163,61 +2163,52 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
struct vfio_iommu_group *group;
struct vfio_domain *domain, *d;
struct bus_type *bus = NULL;
int ret;
bool resv_msi, msi_remap;
phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry *geo;
LIST_HEAD(iova_copy);
LIST_HEAD(group_resv_regions);
int ret = -EINVAL;
mutex_lock(&iommu->lock);
/* Check for duplicates */
if (vfio_iommu_find_iommu_group(iommu, iommu_group)) {
mutex_unlock(&iommu->lock);
return -EINVAL;
}
if (vfio_iommu_find_iommu_group(iommu, iommu_group))
goto out_unlock;
group = kzalloc(sizeof(*group), GFP_KERNEL);
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!group || !domain) {
ret = -ENOMEM;
goto out_free;
}
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
goto out_unlock;
group->iommu_group = iommu_group;
/* Determine bus_type in order to allocate a domain */
ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
if (ret)
goto out_free;
if (type == VFIO_EMULATED_IOMMU) {
if (!iommu->external_domain) {
INIT_LIST_HEAD(&domain->group_list);
iommu->external_domain = domain;
} else {
kfree(domain);
}
list_add(&group->next, &iommu->external_domain->group_list);
list_add(&group->next, &iommu->emulated_iommu_groups);
/*
* Non-iommu backed group cannot dirty memory directly, it can
* An emulated IOMMU group cannot dirty memory directly, it can
* only use interfaces that provide dirty tracking.
* The iommu scope can only be promoted with the addition of a
* dirty tracking group.
*/
group->pinned_page_dirty_scope = true;
mutex_unlock(&iommu->lock);
return 0;
ret = 0;
goto out_unlock;
}
domain->domain = iommu_domain_alloc(bus);
if (!domain->domain) {
/* Determine bus_type in order to allocate a domain */
ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
if (ret)
goto out_free_group;
ret = -ENOMEM;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
goto out_free_group;
ret = -EIO;
goto out_free;
}
domain->domain = iommu_domain_alloc(bus);
if (!domain->domain)
goto out_free_domain;
if (iommu->nesting) {
ret = iommu_enable_nesting(domain->domain);
@ -2344,9 +2335,11 @@ out_domain:
iommu_domain_free(domain->domain);
vfio_iommu_iova_free(&iova_copy);
vfio_iommu_resv_free(&group_resv_regions);
out_free:
out_free_domain:
kfree(domain);
out_free_group:
kfree(group);
out_unlock:
mutex_unlock(&iommu->lock);
return ret;
}
@ -2471,26 +2464,20 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
LIST_HEAD(iova_copy);
mutex_lock(&iommu->lock);
if (iommu->external_domain) {
group = find_iommu_group(iommu->external_domain, iommu_group);
if (group) {
list_for_each_entry(group, &iommu->emulated_iommu_groups, next) {
if (group->iommu_group != iommu_group)
continue;
update_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
if (list_empty(&iommu->external_domain->group_list)) {
if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
if (list_empty(&iommu->emulated_iommu_groups) &&
!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
WARN_ON(iommu->notifier.head);
vfio_iommu_unmap_unpin_all(iommu);
}
kfree(iommu->external_domain);
iommu->external_domain = NULL;
}
goto detach_group_done;
}
}
/*
* Get a copy of iova list. This will be used to update
@ -2517,7 +2504,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
*/
if (list_empty(&domain->group_list)) {
if (list_is_singular(&iommu->domain_list)) {
if (!iommu->external_domain) {
if (list_empty(&iommu->emulated_iommu_groups)) {
WARN_ON(iommu->notifier.head);
vfio_iommu_unmap_unpin_all(iommu);
} else {
@ -2582,23 +2569,22 @@ static void *vfio_iommu_type1_open(unsigned long arg)
BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
init_waitqueue_head(&iommu->vaddr_wait);
iommu->pgsize_bitmap = PAGE_MASK;
INIT_LIST_HEAD(&iommu->emulated_iommu_groups);
return iommu;
}
static void vfio_release_domain(struct vfio_domain *domain, bool external)
static void vfio_release_domain(struct vfio_domain *domain)
{
struct vfio_iommu_group *group, *group_tmp;
list_for_each_entry_safe(group, group_tmp,
&domain->group_list, next) {
if (!external)
iommu_detach_group(domain->domain, group->iommu_group);
list_del(&group->next);
kfree(group);
}
if (!external)
iommu_domain_free(domain->domain);
}
@ -2606,17 +2592,19 @@ static void vfio_iommu_type1_release(void *iommu_data)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain, *domain_tmp;
struct vfio_iommu_group *group, *next_group;
if (iommu->external_domain) {
vfio_release_domain(iommu->external_domain, true);
kfree(iommu->external_domain);
list_for_each_entry_safe(group, next_group,
&iommu->emulated_iommu_groups, next) {
list_del(&group->next);
kfree(group);
}
vfio_iommu_unmap_unpin_all(iommu);
list_for_each_entry_safe(domain, domain_tmp,
&iommu->domain_list, next) {
vfio_release_domain(domain, false);
vfio_release_domain(domain);
list_del(&domain->next);
kfree(domain);
}