iommu/amd: Add support for contiguous dma allocator
Add code to allocate memory from the contiguous memory allocator to support coherent allocations larger than 8MB. Tested-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
5fc872c732
commit
3b839a5799
|
@ -33,6 +33,7 @@
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/msi.h>
|
#include <linux/msi.h>
|
||||||
|
#include <linux/dma-contiguous.h>
|
||||||
#include <asm/irq_remapping.h>
|
#include <asm/irq_remapping.h>
|
||||||
#include <asm/io_apic.h>
|
#include <asm/io_apic.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
@ -2913,37 +2914,42 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_addr, gfp_t flag,
|
dma_addr_t *dma_addr, gfp_t flag,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
void *virt_addr;
|
|
||||||
struct protection_domain *domain;
|
|
||||||
phys_addr_t paddr;
|
|
||||||
u64 dma_mask = dev->coherent_dma_mask;
|
u64 dma_mask = dev->coherent_dma_mask;
|
||||||
|
struct protection_domain *domain;
|
||||||
|
unsigned long flags;
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
INC_STATS_COUNTER(cnt_alloc_coherent);
|
INC_STATS_COUNTER(cnt_alloc_coherent);
|
||||||
|
|
||||||
domain = get_domain(dev);
|
domain = get_domain(dev);
|
||||||
if (PTR_ERR(domain) == -EINVAL) {
|
if (PTR_ERR(domain) == -EINVAL) {
|
||||||
virt_addr = (void *)__get_free_pages(flag, get_order(size));
|
page = alloc_pages(flag, get_order(size));
|
||||||
*dma_addr = __pa(virt_addr);
|
*dma_addr = page_to_phys(page);
|
||||||
return virt_addr;
|
return page_address(page);
|
||||||
} else if (IS_ERR(domain))
|
} else if (IS_ERR(domain))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
size = PAGE_ALIGN(size);
|
||||||
dma_mask = dev->coherent_dma_mask;
|
dma_mask = dev->coherent_dma_mask;
|
||||||
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
||||||
|
|
||||||
virt_addr = (void *)__get_free_pages(flag, get_order(size));
|
page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
|
||||||
if (!virt_addr)
|
if (!page) {
|
||||||
|
if (!(flag & __GFP_WAIT))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
paddr = virt_to_phys(virt_addr);
|
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
|
||||||
|
get_order(size));
|
||||||
|
if (!page)
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (!dma_mask)
|
if (!dma_mask)
|
||||||
dma_mask = *dev->dma_mask;
|
dma_mask = *dev->dma_mask;
|
||||||
|
|
||||||
spin_lock_irqsave(&domain->lock, flags);
|
spin_lock_irqsave(&domain->lock, flags);
|
||||||
|
|
||||||
*dma_addr = __map_single(dev, domain->priv, paddr,
|
*dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
|
||||||
size, DMA_BIDIRECTIONAL, true, dma_mask);
|
size, DMA_BIDIRECTIONAL, true, dma_mask);
|
||||||
|
|
||||||
if (*dma_addr == DMA_ERROR_CODE) {
|
if (*dma_addr == DMA_ERROR_CODE) {
|
||||||
|
@ -2955,11 +2961,12 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
||||||
|
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
|
||||||
return virt_addr;
|
return page_address(page);
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
|
|
||||||
free_pages((unsigned long)virt_addr, get_order(size));
|
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
|
||||||
|
__free_pages(page, get_order(size));
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -2971,11 +2978,15 @@ static void free_coherent(struct device *dev, size_t size,
|
||||||
void *virt_addr, dma_addr_t dma_addr,
|
void *virt_addr, dma_addr_t dma_addr,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
struct protection_domain *domain;
|
struct protection_domain *domain;
|
||||||
|
unsigned long flags;
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
INC_STATS_COUNTER(cnt_free_coherent);
|
INC_STATS_COUNTER(cnt_free_coherent);
|
||||||
|
|
||||||
|
page = virt_to_page(virt_addr);
|
||||||
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
domain = get_domain(dev);
|
domain = get_domain(dev);
|
||||||
if (IS_ERR(domain))
|
if (IS_ERR(domain))
|
||||||
goto free_mem;
|
goto free_mem;
|
||||||
|
@ -2989,7 +3000,8 @@ static void free_coherent(struct device *dev, size_t size,
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
|
||||||
free_mem:
|
free_mem:
|
||||||
free_pages((unsigned long)virt_addr, get_order(size));
|
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
|
||||||
|
__free_pages(page, get_order(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue