From ee2653bbe89df777e222ec1aa015c2ed7ea06b6b Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 1 Mar 2022 10:01:51 +0800 Subject: [PATCH] iommu/vt-d: Remove domain and devinfo mempool The domain and devinfo memory blocks are only allocated during device probe and released during remove. There's no hot-path context, hence no need for memory pools. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20220301020159.633356-5-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 104 ++---------------------------------- 1 file changed, 5 insertions(+), 99 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 7c2b427bea3b..51db26d8606c 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -452,9 +452,6 @@ static int __init intel_iommu_setup(char *str) } __setup("intel_iommu=", intel_iommu_setup); -static struct kmem_cache *iommu_domain_cache; -static struct kmem_cache *iommu_devinfo_cache; - void *alloc_pgtable_page(int node) { struct page *page; @@ -471,26 +468,6 @@ void free_pgtable_page(void *vaddr) free_page((unsigned long)vaddr); } -static inline void *alloc_domain_mem(void) -{ - return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC); -} - -static void free_domain_mem(void *vaddr) -{ - kmem_cache_free(iommu_domain_cache, vaddr); -} - -static inline void * alloc_devinfo_mem(void) -{ - return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC); -} - -static inline void free_devinfo_mem(void *vaddr) -{ - kmem_cache_free(iommu_devinfo_cache, vaddr); -} - static inline int domain_type_is_si(struct dmar_domain *domain) { return domain->domain.type == IOMMU_DOMAIN_IDENTITY; @@ -1885,11 +1862,10 @@ static struct dmar_domain *alloc_domain(unsigned int type) { struct dmar_domain *domain; - domain = alloc_domain_mem(); + domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; - memset(domain, 0, sizeof(*domain)); domain->nid = NUMA_NO_NODE; if (first_level_by_default(type)) domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL; @@ -1973,7 +1949,7 @@ static void domain_exit(struct dmar_domain *domain) put_pages_list(&freelist); } - free_domain_mem(domain); + kfree(domain); } /* @@ -2558,7 +2534,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, unsigned long flags; int ret; - info = alloc_devinfo_mem(); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return NULL; @@ -2574,13 +2550,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, info->segment = pci_domain_nr(pdev->bus); } - info->ats_supported = info->pasid_supported = info->pri_supported = 0; - info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0; - info->ats_qdep = 0; info->dev = dev; info->domain = domain; info->iommu = iommu; - info->pasid_table = NULL; if (dev && dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(info->dev); @@ -2610,7 +2582,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, if (ret) { spin_unlock_irqrestore(&device_domain_lock, flags); - free_devinfo_mem(info); + kfree(info); return NULL; } @@ -3343,65 +3315,6 @@ error: return ret; } -static inline int iommu_domain_cache_init(void) -{ - int ret = 0; - - iommu_domain_cache = kmem_cache_create("iommu_domain", - sizeof(struct dmar_domain), - 0, - SLAB_HWCACHE_ALIGN, - - NULL); - if (!iommu_domain_cache) { - pr_err("Couldn't create iommu_domain cache\n"); - ret = -ENOMEM; - } - - return ret; -} - -static inline int iommu_devinfo_cache_init(void) -{ - int ret = 0; - - iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", - sizeof(struct device_domain_info), - 0, - SLAB_HWCACHE_ALIGN, - NULL); - if (!iommu_devinfo_cache) { - pr_err("Couldn't create devinfo cache\n"); - ret = -ENOMEM; - } - - return ret; -} - -static int __init iommu_init_mempool(void) -{ - int ret; - - ret = iommu_domain_cache_init(); - if (ret) - goto domain_error; - - ret = iommu_devinfo_cache_init(); - if (!ret) - return ret; - - kmem_cache_destroy(iommu_domain_cache); -domain_error: - - return -ENOMEM; -} - -static void __init iommu_exit_mempool(void) -{ - kmem_cache_destroy(iommu_devinfo_cache); - kmem_cache_destroy(iommu_domain_cache); -} - static void __init init_no_remapping_devices(void) { struct dmar_drhd_unit *drhd; @@ -4253,12 +4166,6 @@ int __init intel_iommu_init(void) force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) || platform_optin_force_iommu(); - if (iommu_init_mempool()) { - if (force_on) - panic("tboot: Failed to initialize iommu memory\n"); - return -ENOMEM; - } - down_write(&dmar_global_lock); if (dmar_table_init()) { if (force_on) @@ -4379,7 +4286,6 @@ int __init intel_iommu_init(void) out_free_dmar: intel_iommu_free_dmars(); up_write(&dmar_global_lock); - iommu_exit_mempool(); return ret; } @@ -4436,7 +4342,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) domain_detach_iommu(domain, iommu); spin_unlock_irqrestore(&iommu->lock, flags); - free_devinfo_mem(info); + kfree(info); } static void dmar_remove_one_dev_info(struct device *dev)