Merge branch 'fixes-for-v3.18' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull CMA and DMA-mapping fixes from Marek Szyprowski: "This contains important fixes for recently introduced highmem support for default contiguous memory region used for dma-mapping subsystem" * 'fixes-for-v3.18' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: mm, cma: make parameters order consistent in func declaration and definition mm: cma: Use %pa to print physical addresses mm: cma: Ensure that reservations never cross the low/high mem boundary mm: cma: Always consider a 0 base address reservation as dynamic mm: cma: Don't crash on allocation if CMA area can't be activated
This commit is contained in:
commit
f3ed88a6bc
|
@ -18,12 +18,12 @@ struct cma;
|
||||||
extern phys_addr_t cma_get_base(struct cma *cma);
|
extern phys_addr_t cma_get_base(struct cma *cma);
|
||||||
extern unsigned long cma_get_size(struct cma *cma);
|
extern unsigned long cma_get_size(struct cma *cma);
|
||||||
|
|
||||||
extern int __init cma_declare_contiguous(phys_addr_t size,
|
extern int __init cma_declare_contiguous(phys_addr_t base,
|
||||||
phys_addr_t base, phys_addr_t limit,
|
phys_addr_t size, phys_addr_t limit,
|
||||||
phys_addr_t alignment, unsigned int order_per_bit,
|
phys_addr_t alignment, unsigned int order_per_bit,
|
||||||
bool fixed, struct cma **res_cma);
|
bool fixed, struct cma **res_cma);
|
||||||
extern int cma_init_reserved_mem(phys_addr_t size,
|
extern int cma_init_reserved_mem(phys_addr_t base,
|
||||||
phys_addr_t base, int order_per_bit,
|
phys_addr_t size, int order_per_bit,
|
||||||
struct cma **res_cma);
|
struct cma **res_cma);
|
||||||
extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
|
extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
|
||||||
extern bool cma_release(struct cma *cma, struct page *pages, int count);
|
extern bool cma_release(struct cma *cma, struct page *pages, int count);
|
||||||
|
|
70
mm/cma.c
70
mm/cma.c
|
@ -124,6 +124,7 @@ static int __init cma_activate_area(struct cma *cma)
|
||||||
|
|
||||||
err:
|
err:
|
||||||
kfree(cma->bitmap);
|
kfree(cma->bitmap);
|
||||||
|
cma->count = 0;
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -217,9 +218,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
|
||||||
phys_addr_t highmem_start = __pa(high_memory);
|
phys_addr_t highmem_start = __pa(high_memory);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
|
pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
|
||||||
__func__, (unsigned long)size, (unsigned long)base,
|
__func__, &size, &base, &limit, &alignment);
|
||||||
(unsigned long)limit, (unsigned long)alignment);
|
|
||||||
|
|
||||||
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
|
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
|
||||||
pr_err("Not enough slots for CMA reserved regions!\n");
|
pr_err("Not enough slots for CMA reserved regions!\n");
|
||||||
|
@ -244,52 +244,72 @@ int __init cma_declare_contiguous(phys_addr_t base,
|
||||||
size = ALIGN(size, alignment);
|
size = ALIGN(size, alignment);
|
||||||
limit &= ~(alignment - 1);
|
limit &= ~(alignment - 1);
|
||||||
|
|
||||||
|
if (!base)
|
||||||
|
fixed = false;
|
||||||
|
|
||||||
/* size should be aligned with order_per_bit */
|
/* size should be aligned with order_per_bit */
|
||||||
if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
|
if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* adjust limit to avoid crossing low/high memory boundary for
|
* If allocating at a fixed base the request region must not cross the
|
||||||
* automatically allocated regions
|
* low/high memory boundary.
|
||||||
*/
|
*/
|
||||||
if (((limit == 0 || limit > memblock_end) &&
|
if (fixed && base < highmem_start && base + size > highmem_start) {
|
||||||
(memblock_end - size < highmem_start &&
|
|
||||||
memblock_end > highmem_start)) ||
|
|
||||||
(!fixed && limit > highmem_start && limit - size < highmem_start)) {
|
|
||||||
limit = highmem_start;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fixed && base < highmem_start && base+size > highmem_start) {
|
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n",
|
pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
|
||||||
(unsigned long)base, (unsigned long)highmem_start);
|
&base, &highmem_start);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the limit is unspecified or above the memblock end, its effective
|
||||||
|
* value will be the memblock end. Set it explicitly to simplify further
|
||||||
|
* checks.
|
||||||
|
*/
|
||||||
|
if (limit == 0 || limit > memblock_end)
|
||||||
|
limit = memblock_end;
|
||||||
|
|
||||||
/* Reserve memory */
|
/* Reserve memory */
|
||||||
if (base && fixed) {
|
if (fixed) {
|
||||||
if (memblock_is_region_reserved(base, size) ||
|
if (memblock_is_region_reserved(base, size) ||
|
||||||
memblock_reserve(base, size) < 0) {
|
memblock_reserve(base, size) < 0) {
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
phys_addr_t addr = memblock_alloc_range(size, alignment, base,
|
phys_addr_t addr = 0;
|
||||||
limit);
|
|
||||||
if (!addr) {
|
/*
|
||||||
ret = -ENOMEM;
|
* All pages in the reserved area must come from the same zone.
|
||||||
goto err;
|
* If the requested region crosses the low/high memory boundary,
|
||||||
} else {
|
* try allocating from high memory first and fall back to low
|
||||||
base = addr;
|
* memory in case of failure.
|
||||||
|
*/
|
||||||
|
if (base < highmem_start && limit > highmem_start) {
|
||||||
|
addr = memblock_alloc_range(size, alignment,
|
||||||
|
highmem_start, limit);
|
||||||
|
limit = highmem_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!addr) {
|
||||||
|
addr = memblock_alloc_range(size, alignment, base,
|
||||||
|
limit);
|
||||||
|
if (!addr) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
base = addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
|
ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
|
pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
|
||||||
(unsigned long)base);
|
&base);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
|
|
Loading…
Reference in New Issue