iommu/vt-d: Delegate the identity domain to upper layer

This allows the iommu generic layer to allocate an identity domain
and attach it to a device. Hence, the identity domain is delegated
to upper layer. As a side effect, iommu_identity_mapping can't be
used to check the existence of identity domains any more.

Signed-off-by: James Sewart <jamessewart@arista.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Lu Baolu 2019-05-25 13:41:27 +08:00 committed by Joerg Roedel
parent f273a453b7
commit 4de354ec2f
1 changed files with 58 additions and 32 deletions

View File

@ -350,6 +350,7 @@ static void domain_context_clear(struct intel_iommu *iommu,
struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu);
static bool device_is_rmrr_locked(struct device *dev);
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
@ -2808,7 +2809,9 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw)
{
int nid, ret;
struct dmar_rmrr_unit *rmrr;
struct device *dev;
int i, nid, ret;
si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
if (!si_domain)
@ -2819,8 +2822,6 @@ static int __init si_domain_init(int hw)
return -EFAULT;
}
pr_debug("Identity mapping domain allocated\n");
if (hw)
return 0;
@ -2836,6 +2837,31 @@ static int __init si_domain_init(int hw)
}
}
/*
* Normally we use DMA domains for devices which have RMRRs. But we
* loose this requirement for graphic and usb devices. Identity map
* the RMRRs for graphic and USB devices so that they could use the
* si_domain.
*/
for_each_rmrr_units(rmrr) {
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
i, dev) {
unsigned long long start = rmrr->base_address;
unsigned long long end = rmrr->end_address;
if (device_is_rmrr_locked(dev))
continue;
if (WARN_ON(end < start ||
end >> agaw_to_width(si_domain->agaw)))
continue;
ret = iommu_domain_identity_map(si_domain, start, end);
if (ret)
return ret;
}
}
return 0;
}
@ -2843,9 +2869,6 @@ static int identity_mapping(struct device *dev)
{
struct device_domain_info *info;
if (likely(!iommu_identity_mapping))
return 0;
info = dev->archdata.iommu;
if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
return (info->domain == si_domain);
@ -3431,11 +3454,9 @@ static int __init init_dmars(void)
check_tylersburg_isoch();
if (iommu_identity_mapping) {
ret = si_domain_init(hw_pass_through);
if (ret)
goto free_iommu;
}
ret = si_domain_init(hw_pass_through);
if (ret)
goto free_iommu;
/*
@ -3628,9 +3649,6 @@ static bool iommu_need_mapping(struct device *dev)
if (iommu_dummy(dev))
return false;
if (!iommu_identity_mapping)
return true;
found = identity_mapping(dev);
if (found) {
if (iommu_should_identity_map(dev, 0))
@ -5051,32 +5069,40 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
switch (type) {
case IOMMU_DOMAIN_UNMANAGED:
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
if (!dmar_domain) {
pr_err("Can't allocate dmar_domain\n");
return NULL;
}
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
pr_err("Domain initialization failed\n");
domain_exit(dmar_domain);
return NULL;
}
domain_update_iommu_cap(dmar_domain);
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
if (!dmar_domain) {
pr_err("Can't allocate dmar_domain\n");
domain = &dmar_domain->domain;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end =
__DOMAIN_MAX_ADDR(dmar_domain->gaw);
domain->geometry.force_aperture = true;
return domain;
case IOMMU_DOMAIN_IDENTITY:
return &si_domain->domain;
default:
return NULL;
}
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
pr_err("Domain initialization failed\n");
domain_exit(dmar_domain);
return NULL;
}
domain_update_iommu_cap(dmar_domain);
domain = &dmar_domain->domain;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
domain->geometry.force_aperture = true;
return domain;
return NULL;
}
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
domain_exit(to_dmar_domain(domain));
if (domain != &si_domain->domain)
domain_exit(to_dmar_domain(domain));
}
/*