iommu: Make IOVA domain low limit flexible

To share the IOVA allocator with other architectures, it needs to
accommodate more general aperture restrictions; move the lower limit
from a compile-time constant to a runtime domain property to allow
IOVA domains with different requirements to co-exist.

Also reword the slightly unclear description of alloc_iova since we're
touching it anyway.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Robin Murphy 2015-01-12 17:51:15 +00:00 committed by Joerg Roedel
parent 85b4545629
commit 1b72250076
3 changed files with 15 additions and 11 deletions

View File

@ -71,6 +71,9 @@
__DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
@ -1632,7 +1635,7 @@ static int dmar_init_reserved_ranges(void)
struct iova *iova; struct iova *iova;
int i; int i;
init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); init_iova_domain(&reserved_iova_list, IOVA_START_PFN, DMA_32BIT_PFN);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key); &reserved_rbtree_key);
@ -1690,7 +1693,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
int adjust_width, agaw; int adjust_width, agaw;
unsigned long sagaw; unsigned long sagaw;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN); init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN);
domain_reserve_special_ranges(domain); domain_reserve_special_ranges(domain);
/* calculate AGAW */ /* calculate AGAW */
@ -4313,7 +4316,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{ {
int adjust_width; int adjust_width;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN); init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN);
domain_reserve_special_ranges(domain); domain_reserve_special_ranges(domain);
/* calculate AGAW */ /* calculate AGAW */

View File

@ -55,11 +55,13 @@ void free_iova_mem(struct iova *iova)
} }
void void
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) init_iova_domain(struct iova_domain *iovad, unsigned long start_pfn,
unsigned long pfn_32bit)
{ {
spin_lock_init(&iovad->iova_rbtree_lock); spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT; iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL; iovad->cached32_node = NULL;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit; iovad->dma_32bit_pfn = pfn_32bit;
} }
@ -162,7 +164,7 @@ move_left:
if (!curr) { if (!curr) {
if (size_aligned) if (size_aligned)
pad_size = iova_get_pad_size(size, limit_pfn); pad_size = iova_get_pad_size(size, limit_pfn);
if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM; return -ENOMEM;
} }
@ -237,8 +239,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
* @size: - size of page frames to allocate * @size: - size of page frames to allocate
* @limit_pfn: - max limit address * @limit_pfn: - max limit address
* @size_aligned: - set if size_aligned address range is required * @size_aligned: - set if size_aligned address range is required
* This function allocates an iova in the range limit_pfn to IOVA_START_PFN * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
* looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
* flag is set then the allocated address iova->pfn_lo will be naturally * flag is set then the allocated address iova->pfn_lo will be naturally
* aligned on roundup_power_of_two(size). * aligned on roundup_power_of_two(size).
*/ */

View File

@ -16,9 +16,6 @@
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
/* iova structure */ /* iova structure */
struct iova { struct iova {
struct rb_node node; struct rb_node node;
@ -31,6 +28,7 @@ struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */ struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */ struct rb_node *cached32_node; /* Save last alloced node */
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn; unsigned long dma_32bit_pfn;
}; };
@ -52,7 +50,8 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi); unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); void init_iova_domain(struct iova_domain *iovad, unsigned long start_pfn,
unsigned long pfn_32bit);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad); void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad, struct iova *split_and_remove_iova(struct iova_domain *iovad,