dma-pool: make sure atomic pool suits device
When allocating DMA memory from a pool, the core can only guess which
atomic pool will fit a device's constraints. If it doesn't, get a safer
atomic pool and try again.
Fixes: c84dc6e68a
("dma-pool: add additional coherent pools to map to gfp mask")
Reported-by: Jeremy Linton <jeremy.linton@arm.com>
Suggested-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
48b6703858
commit
81e9d894e0
|
@ -240,39 +240,56 @@ static inline struct gen_pool *dma_guess_pool(struct device *dev,
|
|||
void *dma_alloc_from_pool(struct device *dev, size_t size,
|
||||
struct page **ret_page, gfp_t flags)
|
||||
{
|
||||
struct gen_pool *pool = dma_guess_pool(dev, NULL);
|
||||
unsigned long val;
|
||||
struct gen_pool *pool = NULL;
|
||||
unsigned long val = 0;
|
||||
void *ptr = NULL;
|
||||
phys_addr_t phys;
|
||||
|
||||
if (!pool) {
|
||||
WARN(1, "%pGg atomic pool not initialised!\n", &flags);
|
||||
return NULL;
|
||||
while (1) {
|
||||
pool = dma_guess_pool(dev, pool);
|
||||
if (!pool) {
|
||||
WARN(1, "Failed to get suitable pool for %s\n",
|
||||
dev_name(dev));
|
||||
break;
|
||||
}
|
||||
|
||||
val = gen_pool_alloc(pool, size);
|
||||
if (!val)
|
||||
continue;
|
||||
|
||||
phys = gen_pool_virt_to_phys(pool, val);
|
||||
if (dma_coherent_ok(dev, phys, size))
|
||||
break;
|
||||
|
||||
gen_pool_free(pool, val, size);
|
||||
val = 0;
|
||||
}
|
||||
|
||||
val = gen_pool_alloc(pool, size);
|
||||
if (likely(val)) {
|
||||
phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
|
||||
|
||||
if (val) {
|
||||
*ret_page = pfn_to_page(__phys_to_pfn(phys));
|
||||
ptr = (void *)val;
|
||||
memset(ptr, 0, size);
|
||||
} else {
|
||||
WARN_ONCE(1, "DMA coherent pool depleted, increase size "
|
||||
"(recommended min coherent_pool=%zuK)\n",
|
||||
gen_pool_size(pool) >> 9);
|
||||
|
||||
if (gen_pool_avail(pool) < atomic_pool_size)
|
||||
schedule_work(&atomic_pool_work);
|
||||
}
|
||||
if (gen_pool_avail(pool) < atomic_pool_size)
|
||||
schedule_work(&atomic_pool_work);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
bool dma_free_from_pool(struct device *dev, void *start, size_t size)
|
||||
{
|
||||
struct gen_pool *pool = dma_guess_pool(dev, NULL);
|
||||
struct gen_pool *pool = NULL;
|
||||
|
||||
if (!pool || !gen_pool_has_addr(pool, (unsigned long)start, size))
|
||||
return false;
|
||||
gen_pool_free(pool, (unsigned long)start, size);
|
||||
return true;
|
||||
while (1) {
|
||||
pool = dma_guess_pool(dev, pool);
|
||||
if (!pool)
|
||||
return false;
|
||||
|
||||
if (gen_pool_has_addr(pool, (unsigned long)start, size)) {
|
||||
gen_pool_free(pool, (unsigned long)start, size);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue