memblock: split memblock_find_base() out of __memblock_alloc_base()
This will be used by the array resize code and might prove useful to some arch code as well at which point it can be made non-static. Also add comment as to why aligning size is important Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> --- v2. Fix loss of size alignment v3. Fix result code
This commit is contained in:
parent
7590abe891
commit
7f219c736f
|
@ -345,12 +345,15 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n
|
|||
|
||||
BUG_ON(0 == size);
|
||||
|
||||
/* We align the size to limit fragmentation. Without this, a lot of
|
||||
* small allocs quickly eat up the whole reserve array on sparc
|
||||
*/
|
||||
size = memblock_align_up(size, align);
|
||||
|
||||
/* We do a bottom-up search for a region with the right
|
||||
* nid since that's easier considering how memblock_nid_range()
|
||||
* works
|
||||
*/
|
||||
size = memblock_align_up(size, align);
|
||||
|
||||
for (i = 0; i < mem->cnt; i++) {
|
||||
phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
|
||||
size, align, nid);
|
||||
|
@ -366,20 +369,7 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
|
|||
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
|
||||
}
|
||||
|
||||
phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
|
||||
{
|
||||
phys_addr_t alloc;
|
||||
|
||||
alloc = __memblock_alloc_base(size, align, max_addr);
|
||||
|
||||
if (alloc == 0)
|
||||
panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
|
||||
(unsigned long long) size, (unsigned long long) max_addr);
|
||||
|
||||
return alloc;
|
||||
}
|
||||
|
||||
phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
|
||||
static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
|
||||
{
|
||||
long i;
|
||||
phys_addr_t base = 0;
|
||||
|
@ -387,8 +377,6 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph
|
|||
|
||||
BUG_ON(0 == size);
|
||||
|
||||
size = memblock_align_up(size, align);
|
||||
|
||||
/* Pump up max_addr */
|
||||
if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE)
|
||||
max_addr = memblock.current_limit;
|
||||
|
@ -405,13 +393,43 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph
|
|||
continue;
|
||||
base = min(memblockbase + memblocksize, max_addr);
|
||||
res_base = memblock_find_region(memblockbase, base, size, align);
|
||||
if (res_base != MEMBLOCK_ERROR &&
|
||||
memblock_add_region(&memblock.reserved, res_base, size) >= 0)
|
||||
if (res_base != MEMBLOCK_ERROR)
|
||||
return res_base;
|
||||
}
|
||||
return MEMBLOCK_ERROR;
|
||||
}
|
||||
|
||||
phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
|
||||
{
|
||||
phys_addr_t found;
|
||||
|
||||
/* We align the size to limit fragmentation. Without this, a lot of
|
||||
* small allocs quickly eat up the whole reserve array on sparc
|
||||
*/
|
||||
size = memblock_align_up(size, align);
|
||||
|
||||
found = memblock_find_base(size, align, max_addr);
|
||||
if (found != MEMBLOCK_ERROR &&
|
||||
memblock_add_region(&memblock.reserved, found, size) >= 0)
|
||||
return found;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
|
||||
{
|
||||
phys_addr_t alloc;
|
||||
|
||||
alloc = __memblock_alloc_base(size, align, max_addr);
|
||||
|
||||
if (alloc == 0)
|
||||
panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
|
||||
(unsigned long long) size, (unsigned long long) max_addr);
|
||||
|
||||
return alloc;
|
||||
}
|
||||
|
||||
|
||||
/* You must call memblock_analyze() before this. */
|
||||
phys_addr_t __init memblock_phys_mem_size(void)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue