iommu: iova: Move iova cache management to the iova library

This is necessary to separate intel-iommu from the iova library.

Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Sakari Ailus 2015-09-29 18:07:02 -07:00 committed by Greg Kroah-Hartman
parent 19f7767e29
commit 09e8b485ee
3 changed files with 54 additions and 39 deletions

View File

@ -3711,7 +3711,7 @@ static inline int iommu_devinfo_cache_init(void)
static int __init iommu_init_mempool(void) static int __init iommu_init_mempool(void)
{ {
int ret; int ret;
ret = iommu_iova_cache_init(); ret = iova_cache_get();
if (ret) if (ret)
return ret; return ret;
@ -3725,7 +3725,7 @@ static int __init iommu_init_mempool(void)
kmem_cache_destroy(iommu_domain_cache); kmem_cache_destroy(iommu_domain_cache);
domain_error: domain_error:
iommu_iova_cache_destroy(); iova_cache_put();
return -ENOMEM; return -ENOMEM;
} }
@ -3734,7 +3734,7 @@ static void __init iommu_exit_mempool(void)
{ {
kmem_cache_destroy(iommu_devinfo_cache); kmem_cache_destroy(iommu_devinfo_cache);
kmem_cache_destroy(iommu_domain_cache); kmem_cache_destroy(iommu_domain_cache);
iommu_iova_cache_destroy(); iova_cache_put();
} }
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)

View File

@ -20,40 +20,6 @@
#include <linux/iova.h> #include <linux/iova.h>
#include <linux/slab.h> #include <linux/slab.h>
static struct kmem_cache *iommu_iova_cache;
int iommu_iova_cache_init(void)
{
int ret = 0;
iommu_iova_cache = kmem_cache_create("iommu_iova",
sizeof(struct iova),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!iommu_iova_cache) {
pr_err("Couldn't create iova cache\n");
ret = -ENOMEM;
}
return ret;
}
void iommu_iova_cache_destroy(void)
{
kmem_cache_destroy(iommu_iova_cache);
}
struct iova *alloc_iova_mem(void)
{
return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
}
void free_iova_mem(struct iova *iova)
{
kmem_cache_free(iommu_iova_cache, iova);
}
void void
init_iova_domain(struct iova_domain *iovad, unsigned long granule, init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit) unsigned long start_pfn, unsigned long pfn_32bit)
@ -242,6 +208,55 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
rb_insert_color(&iova->node, root); rb_insert_color(&iova->node, root);
} }
static struct kmem_cache *iova_cache;
static unsigned int iova_cache_users;
static DEFINE_MUTEX(iova_cache_mutex);
struct iova *alloc_iova_mem(void)
{
return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
}
EXPORT_SYMBOL(alloc_iova_mem);
void free_iova_mem(struct iova *iova)
{
kmem_cache_free(iova_cache, iova);
}
EXPORT_SYMBOL(free_iova_mem);
int iova_cache_get(void)
{
mutex_lock(&iova_cache_mutex);
if (!iova_cache_users) {
iova_cache = kmem_cache_create(
"iommu_iova", sizeof(struct iova), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!iova_cache) {
mutex_unlock(&iova_cache_mutex);
printk(KERN_ERR "Couldn't create iova cache\n");
return -ENOMEM;
}
}
iova_cache_users++;
mutex_unlock(&iova_cache_mutex);
return 0;
}
void iova_cache_put(void)
{
mutex_lock(&iova_cache_mutex);
if (WARN_ON(!iova_cache_users)) {
mutex_unlock(&iova_cache_mutex);
return;
}
iova_cache_users--;
if (!iova_cache_users)
kmem_cache_destroy(iova_cache);
mutex_unlock(&iova_cache_mutex);
}
/** /**
* alloc_iova - allocates an iova * alloc_iova - allocates an iova
* @iovad: - iova domain in question * @iovad: - iova domain in question

View File

@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad); return iova >> iova_shift(iovad);
} }
int iommu_iova_cache_init(void); int iova_cache_get(void);
void iommu_iova_cache_destroy(void); void iova_cache_put(void);
struct iova *alloc_iova_mem(void); struct iova *alloc_iova_mem(void);
void free_iova_mem(struct iova *iova); void free_iova_mem(struct iova *iova);