arm: dma-mapping: Add support to extend DMA IOMMU mappings
Instead of using just one bitmap to keep track of IO virtual addresses (handed out for IOMMU use) introduce an array of bitmaps. This allows us to extend existing mappings when running out of iova space in the initial mapping etc. If there is not enough space in the mapping to service an IO virtual address allocation request, __alloc_iova() tries to extend the mapping -- by allocating another bitmap -- and makes another allocation attempt using the freshly allocated bitmap. This allows arm iommu drivers to start with a decent initial size when an dma_iommu_mapping is created and still to avoid running out of IO virtual addresses for the mapping. Signed-off-by: Andreas Herrmann <andreas.herrmann@calxeda.com> [mszyprow: removed extensions parameter to arm_iommu_create_mapping() function, which will be modified in the next patch anyway, also some debug messages about extending bitmap] Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
This commit is contained in:
parent
cfbf8d4857
commit
4d852ef8c2
|
@ -13,8 +13,12 @@ struct dma_iommu_mapping {
|
||||||
/* iommu specific data */
|
/* iommu specific data */
|
||||||
struct iommu_domain *domain;
|
struct iommu_domain *domain;
|
||||||
|
|
||||||
void *bitmap;
|
unsigned long **bitmaps; /* array of bitmaps */
|
||||||
size_t bits;
|
unsigned int nr_bitmaps; /* nr of elements in array */
|
||||||
|
unsigned int extensions;
|
||||||
|
size_t bitmap_size; /* size of a single bitmap */
|
||||||
|
size_t bits; /* per bitmap */
|
||||||
|
unsigned int size; /* per bitmap */
|
||||||
unsigned int order;
|
unsigned int order;
|
||||||
dma_addr_t base;
|
dma_addr_t base;
|
||||||
|
|
||||||
|
|
|
@ -1069,6 +1069,8 @@ fs_initcall(dma_debug_do_init);
|
||||||
|
|
||||||
/* IOMMU */
|
/* IOMMU */
|
||||||
|
|
||||||
|
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
|
||||||
|
|
||||||
static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
|
@ -1076,6 +1078,8 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
||||||
unsigned int align = 0;
|
unsigned int align = 0;
|
||||||
unsigned int count, start;
|
unsigned int count, start;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
dma_addr_t iova;
|
||||||
|
int i;
|
||||||
|
|
||||||
if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
|
if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
|
||||||
order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
|
order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
|
||||||
|
@ -1087,30 +1091,78 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
||||||
align = (1 << (order - mapping->order)) - 1;
|
align = (1 << (order - mapping->order)) - 1;
|
||||||
|
|
||||||
spin_lock_irqsave(&mapping->lock, flags);
|
spin_lock_irqsave(&mapping->lock, flags);
|
||||||
start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
|
for (i = 0; i < mapping->nr_bitmaps; i++) {
|
||||||
count, align);
|
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
|
||||||
if (start > mapping->bits) {
|
mapping->bits, 0, count, align);
|
||||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
|
||||||
return DMA_ERROR_CODE;
|
if (start > mapping->bits)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
bitmap_set(mapping->bitmaps[i], start, count);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
bitmap_set(mapping->bitmap, start, count);
|
/*
|
||||||
|
* No unused range found. Try to extend the existing mapping
|
||||||
|
* and perform a second attempt to reserve an IO virtual
|
||||||
|
* address range of size bytes.
|
||||||
|
*/
|
||||||
|
if (i == mapping->nr_bitmaps) {
|
||||||
|
if (extend_iommu_mapping(mapping)) {
|
||||||
|
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||||
|
return DMA_ERROR_CODE;
|
||||||
|
}
|
||||||
|
|
||||||
|
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
|
||||||
|
mapping->bits, 0, count, align);
|
||||||
|
|
||||||
|
if (start > mapping->bits) {
|
||||||
|
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||||
|
return DMA_ERROR_CODE;
|
||||||
|
}
|
||||||
|
|
||||||
|
bitmap_set(mapping->bitmaps[i], start, count);
|
||||||
|
}
|
||||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||||
|
|
||||||
return mapping->base + (start << (mapping->order + PAGE_SHIFT));
|
iova = mapping->base + (mapping->size * i);
|
||||||
|
iova += start << (mapping->order + PAGE_SHIFT);
|
||||||
|
|
||||||
|
return iova;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __free_iova(struct dma_iommu_mapping *mapping,
|
static inline void __free_iova(struct dma_iommu_mapping *mapping,
|
||||||
dma_addr_t addr, size_t size)
|
dma_addr_t addr, size_t size)
|
||||||
{
|
{
|
||||||
unsigned int start = (addr - mapping->base) >>
|
unsigned int start, count;
|
||||||
(mapping->order + PAGE_SHIFT);
|
|
||||||
unsigned int count = ((size >> PAGE_SHIFT) +
|
|
||||||
(1 << mapping->order) - 1) >> mapping->order;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
dma_addr_t bitmap_base;
|
||||||
|
u32 bitmap_index;
|
||||||
|
|
||||||
|
if (!size)
|
||||||
|
return;
|
||||||
|
|
||||||
|
bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size;
|
||||||
|
BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
|
||||||
|
|
||||||
|
bitmap_base = mapping->base + mapping->size * bitmap_index;
|
||||||
|
|
||||||
|
start = (addr - bitmap_base) >> (mapping->order + PAGE_SHIFT);
|
||||||
|
|
||||||
|
if (addr + size > bitmap_base + mapping->size) {
|
||||||
|
/*
|
||||||
|
* The address range to be freed reaches into the iova
|
||||||
|
* range of the next bitmap. This should not happen as
|
||||||
|
* we don't allow this in __alloc_iova (at the
|
||||||
|
* moment).
|
||||||
|
*/
|
||||||
|
BUG();
|
||||||
|
} else
|
||||||
|
count = ((size >> PAGE_SHIFT) +
|
||||||
|
(1 << mapping->order) - 1) >> mapping->order;
|
||||||
|
|
||||||
spin_lock_irqsave(&mapping->lock, flags);
|
spin_lock_irqsave(&mapping->lock, flags);
|
||||||
bitmap_clear(mapping->bitmap, start, count);
|
bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
|
||||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1890,8 +1942,8 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
|
||||||
int order)
|
int order)
|
||||||
{
|
{
|
||||||
unsigned int count = size >> (PAGE_SHIFT + order);
|
unsigned int count = size >> (PAGE_SHIFT + order);
|
||||||
unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
|
|
||||||
struct dma_iommu_mapping *mapping;
|
struct dma_iommu_mapping *mapping;
|
||||||
|
int extensions = 0;
|
||||||
int err = -ENOMEM;
|
int err = -ENOMEM;
|
||||||
|
|
||||||
if (!count)
|
if (!count)
|
||||||
|
@ -1901,23 +1953,35 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
|
||||||
if (!mapping)
|
if (!mapping)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
mapping->bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
|
||||||
if (!mapping->bitmap)
|
mapping->bitmaps = kzalloc((extensions + 1) * sizeof(unsigned long *),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!mapping->bitmaps)
|
||||||
goto err2;
|
goto err2;
|
||||||
|
|
||||||
|
mapping->bitmaps[0] = kzalloc(mapping->bitmap_size, GFP_KERNEL);
|
||||||
|
if (!mapping->bitmaps[0])
|
||||||
|
goto err3;
|
||||||
|
|
||||||
|
mapping->nr_bitmaps = 1;
|
||||||
|
mapping->extensions = extensions;
|
||||||
mapping->base = base;
|
mapping->base = base;
|
||||||
mapping->bits = BITS_PER_BYTE * bitmap_size;
|
mapping->size = size;
|
||||||
mapping->order = order;
|
mapping->order = order;
|
||||||
|
mapping->bits = BITS_PER_BYTE * mapping->bitmap_size;
|
||||||
|
|
||||||
spin_lock_init(&mapping->lock);
|
spin_lock_init(&mapping->lock);
|
||||||
|
|
||||||
mapping->domain = iommu_domain_alloc(bus);
|
mapping->domain = iommu_domain_alloc(bus);
|
||||||
if (!mapping->domain)
|
if (!mapping->domain)
|
||||||
goto err3;
|
goto err4;
|
||||||
|
|
||||||
kref_init(&mapping->kref);
|
kref_init(&mapping->kref);
|
||||||
return mapping;
|
return mapping;
|
||||||
|
err4:
|
||||||
|
kfree(mapping->bitmaps[0]);
|
||||||
err3:
|
err3:
|
||||||
kfree(mapping->bitmap);
|
kfree(mapping->bitmaps);
|
||||||
err2:
|
err2:
|
||||||
kfree(mapping);
|
kfree(mapping);
|
||||||
err:
|
err:
|
||||||
|
@ -1927,14 +1991,35 @@ EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
|
||||||
|
|
||||||
static void release_iommu_mapping(struct kref *kref)
|
static void release_iommu_mapping(struct kref *kref)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
struct dma_iommu_mapping *mapping =
|
struct dma_iommu_mapping *mapping =
|
||||||
container_of(kref, struct dma_iommu_mapping, kref);
|
container_of(kref, struct dma_iommu_mapping, kref);
|
||||||
|
|
||||||
iommu_domain_free(mapping->domain);
|
iommu_domain_free(mapping->domain);
|
||||||
kfree(mapping->bitmap);
|
for (i = 0; i < mapping->nr_bitmaps; i++)
|
||||||
|
kfree(mapping->bitmaps[i]);
|
||||||
|
kfree(mapping->bitmaps);
|
||||||
kfree(mapping);
|
kfree(mapping);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
|
||||||
|
{
|
||||||
|
int next_bitmap;
|
||||||
|
|
||||||
|
if (mapping->nr_bitmaps > mapping->extensions)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
next_bitmap = mapping->nr_bitmaps;
|
||||||
|
mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
|
||||||
|
GFP_ATOMIC);
|
||||||
|
if (!mapping->bitmaps[next_bitmap])
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
mapping->nr_bitmaps++;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
|
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
|
||||||
{
|
{
|
||||||
if (mapping)
|
if (mapping)
|
||||||
|
|
Loading…
Reference in New Issue