iommu: Allocate dma-remapping structures using numa locality info
Allocate dma-remapping structures using numa locality info. On platforms having remapping hardware units span different nodes, this enables optimized dma-remapping transalation structures access by remapping hardware. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
parent
824cd75bf3
commit
4c923d4714
|
@ -277,6 +277,7 @@ static int hw_pass_through = 1;
|
|||
|
||||
struct dmar_domain {
|
||||
int id; /* domain id */
|
||||
int nid; /* node id */
|
||||
unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
|
||||
|
||||
struct list_head devices; /* all devices' list */
|
||||
|
@ -400,15 +401,18 @@ static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
|
|||
}
|
||||
|
||||
|
||||
static inline void *alloc_pgtable_page(void)
|
||||
static inline void *alloc_pgtable_page(int node)
|
||||
{
|
||||
unsigned int flags;
|
||||
void *vaddr;
|
||||
struct page *page;
|
||||
void *vaddr = NULL;
|
||||
|
||||
/* trying to avoid low memory issues */
|
||||
flags = current->flags & PF_MEMALLOC;
|
||||
current->flags |= PF_MEMALLOC;
|
||||
vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
|
||||
page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
|
||||
if (page)
|
||||
vaddr = page_address(page);
|
||||
current->flags &= (~PF_MEMALLOC | flags);
|
||||
return vaddr;
|
||||
}
|
||||
|
@ -589,7 +593,8 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
|
|||
root = &iommu->root_entry[bus];
|
||||
context = get_context_addr_from_root(root);
|
||||
if (!context) {
|
||||
context = (struct context_entry *)alloc_pgtable_page();
|
||||
context = (struct context_entry *)
|
||||
alloc_pgtable_page(iommu->node);
|
||||
if (!context) {
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
return NULL;
|
||||
|
@ -732,7 +737,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
|
|||
if (!dma_pte_present(pte)) {
|
||||
uint64_t pteval;
|
||||
|
||||
tmp_page = alloc_pgtable_page();
|
||||
tmp_page = alloc_pgtable_page(domain->nid);
|
||||
|
||||
if (!tmp_page)
|
||||
return NULL;
|
||||
|
@ -868,7 +873,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
|
|||
struct root_entry *root;
|
||||
unsigned long flags;
|
||||
|
||||
root = (struct root_entry *)alloc_pgtable_page();
|
||||
root = (struct root_entry *)alloc_pgtable_page(iommu->node);
|
||||
if (!root)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1263,6 +1268,7 @@ static struct dmar_domain *alloc_domain(void)
|
|||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
domain->nid = -1;
|
||||
memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
|
||||
domain->flags = 0;
|
||||
|
||||
|
@ -1420,9 +1426,10 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
|
|||
domain->iommu_snooping = 0;
|
||||
|
||||
domain->iommu_count = 1;
|
||||
domain->nid = iommu->node;
|
||||
|
||||
/* always allocate the top pgd */
|
||||
domain->pgd = (struct dma_pte *)alloc_pgtable_page();
|
||||
domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
|
||||
if (!domain->pgd)
|
||||
return -ENOMEM;
|
||||
__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
|
||||
|
@ -1577,6 +1584,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
|
|||
spin_lock_irqsave(&domain->iommu_lock, flags);
|
||||
if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
|
||||
domain->iommu_count++;
|
||||
if (domain->iommu_count == 1)
|
||||
domain->nid = iommu->node;
|
||||
domain_update_iommu_cap(domain);
|
||||
}
|
||||
spin_unlock_irqrestore(&domain->iommu_lock, flags);
|
||||
|
@ -3416,6 +3425,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
|
|||
return NULL;
|
||||
|
||||
domain->id = vm_domid++;
|
||||
domain->nid = -1;
|
||||
memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
|
||||
domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
|
||||
|
||||
|
@ -3442,9 +3452,10 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
|
|||
domain->iommu_coherency = 0;
|
||||
domain->iommu_snooping = 0;
|
||||
domain->max_addr = 0;
|
||||
domain->nid = -1;
|
||||
|
||||
/* always allocate the top pgd */
|
||||
domain->pgd = (struct dma_pte *)alloc_pgtable_page();
|
||||
domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
|
||||
if (!domain->pgd)
|
||||
return -ENOMEM;
|
||||
domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
|
||||
|
|
Loading…
Reference in New Issue