iommu/vt-d: Add support for IOMMU default DMA mode build options
Make IOMMU_DEFAULT_LAZY default for when INTEL_IOMMU config is set, as is current behaviour. Also delete global flag intel_iommu_strict: - In intel_iommu_setup(), call iommu_set_dma_strict(true) directly. Also remove the print, as iommu_subsys_init() prints the mode and we have already marked this param as deprecated. - For cap_caching_mode() check in intel_iommu_setup(), call iommu_set_dma_strict(true) directly; also reword the accompanying print with a level downgrade and also add the missing '\n'. - For Ironlake GPU, again call iommu_set_dma_strict(true) directly and keep the accompanying print. [jpg: Remove intel_iommu_strict] Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com> Signed-off-by: John Garry <john.garry@huawei.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/1626088340-5838-5-git-send-email-john.garry@huawei.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
712d8f2058
commit
d0e108b8e9
|
@ -94,6 +94,7 @@ choice
|
|||
prompt "IOMMU default DMA IOTLB invalidation mode"
|
||||
depends on IOMMU_DMA
|
||||
|
||||
default IOMMU_DEFAULT_LAZY if INTEL_IOMMU
|
||||
default IOMMU_DEFAULT_STRICT
|
||||
help
|
||||
This option allows an IOMMU DMA IOTLB invalidation mode to be
|
||||
|
|
|
@ -361,7 +361,6 @@ int intel_iommu_enabled = 0;
|
|||
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
|
||||
|
||||
static int dmar_map_gfx = 1;
|
||||
static int intel_iommu_strict;
|
||||
static int intel_iommu_superpage = 1;
|
||||
static int iommu_identity_mapping;
|
||||
static int iommu_skip_te_disable;
|
||||
|
@ -455,8 +454,7 @@ static int __init intel_iommu_setup(char *str)
|
|||
iommu_dma_forcedac = true;
|
||||
} else if (!strncmp(str, "strict", 6)) {
|
||||
pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n");
|
||||
pr_info("Disable batched IOTLB flush\n");
|
||||
intel_iommu_strict = 1;
|
||||
iommu_set_dma_strict(true);
|
||||
} else if (!strncmp(str, "sp_off", 6)) {
|
||||
pr_info("Disable supported super page\n");
|
||||
intel_iommu_superpage = 0;
|
||||
|
@ -4394,9 +4392,9 @@ int __init intel_iommu_init(void)
|
|||
* is likely to be much lower than the overhead of synchronizing
|
||||
* the virtual and physical IOMMU page-tables.
|
||||
*/
|
||||
if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
|
||||
pr_warn("IOMMU batching is disabled due to virtualization");
|
||||
intel_iommu_strict = 1;
|
||||
if (cap_caching_mode(iommu->cap)) {
|
||||
pr_info_once("IOMMU batching disallowed due to virtualization\n");
|
||||
iommu_set_dma_strict(true);
|
||||
}
|
||||
iommu_device_sysfs_add(&iommu->iommu, NULL,
|
||||
intel_iommu_groups,
|
||||
|
@ -4405,7 +4403,6 @@ int __init intel_iommu_init(void)
|
|||
}
|
||||
up_read(&dmar_global_lock);
|
||||
|
||||
iommu_set_dma_strict(intel_iommu_strict);
|
||||
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
|
||||
if (si_domain && !hw_pass_through)
|
||||
register_memory_notifier(&intel_iommu_memory_nb);
|
||||
|
@ -5715,8 +5712,8 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
|
|||
} else if (dmar_map_gfx) {
|
||||
/* we have to ensure the gfx device is idle before we flush */
|
||||
pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
|
||||
intel_iommu_strict = 1;
|
||||
}
|
||||
iommu_set_dma_strict(true);
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
|
||||
|
|
Loading…
Reference in New Issue