iommu/vt-d: Add device_block_translation() helper

If domain attaching to device fails, the IOMMU driver should bring the
device to blocking DMA state. The upper layer is expected to recover it
by attaching a new domain. Use device_block_translation() in the error
path of dev_attach to make the behavior specific.

The difference between device_block_translation() and the previous
dmar_remove_one_dev_info() is that, in the scalable mode, it is the
RID2PASID entry instead of context entry being cleared. As a result,
enabling PCI capabilities is moved up.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20221118132451.114406-3-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Lu Baolu 2022-11-22 08:29:44 +08:00 committed by Joerg Roedel
parent ec62b44241
commit c7be17c290
1 changed files with 38 additions and 6 deletions

View File

@ -277,7 +277,7 @@ static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
static void dmar_remove_one_dev_info(struct device *dev);
static void device_block_translation(struct device *dev);
int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
@ -1400,7 +1400,7 @@ static void iommu_enable_pci_caps(struct device_domain_info *info)
{
struct pci_dev *pdev;
if (!info || !dev_is_pci(info->dev))
if (!dev_is_pci(info->dev))
return;
pdev = to_pci_dev(info->dev);
@ -2045,7 +2045,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
} else {
iommu_flush_write_buffer(iommu);
}
iommu_enable_pci_caps(info);
ret = 0;
@ -2487,7 +2486,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
dev, PASID_RID2PASID);
if (ret) {
dev_err(dev, "Setup RID2PASID failed\n");
dmar_remove_one_dev_info(dev);
device_block_translation(dev);
return ret;
}
}
@ -2495,10 +2494,12 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
ret = domain_context_mapping(domain, dev);
if (ret) {
dev_err(dev, "Domain context map failed\n");
dmar_remove_one_dev_info(dev);
device_block_translation(dev);
return ret;
}
iommu_enable_pci_caps(info);
return 0;
}
@ -4109,6 +4110,37 @@ static void dmar_remove_one_dev_info(struct device *dev)
info->domain = NULL;
}
/*
* Clear the page table pointer in context or pasid table entries so that
* all DMA requests without PASID from the device are blocked. If the page
* table has been set, clean up the data structures.
*/
static void device_block_translation(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
unsigned long flags;
iommu_disable_dev_iotlb(info);
if (!dev_is_real_dma_subdevice(dev)) {
if (sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, dev,
PASID_RID2PASID, false);
else
domain_context_clear(info);
}
if (!info->domain)
return;
spin_lock_irqsave(&info->domain->lock, flags);
list_del(&info->link);
spin_unlock_irqrestore(&info->domain->lock, flags);
domain_detach_iommu(info->domain, iommu);
info->domain = NULL;
}
static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
@ -4232,7 +4264,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device_domain_info *info = dev_iommu_priv_get(dev);
if (info->domain)
dmar_remove_one_dev_info(dev);
device_block_translation(dev);
}
ret = prepare_domain_attach_device(domain, dev);