memblock: replace __alloc_bootmem with memblock_alloc_from
The functions are equivalent, just the later does not require nobootmem translation layer. The conversion is done using the following semantic patch: @@ expression size, align, goal; @@ - __alloc_bootmem(size, align, goal) + memblock_alloc_from(size, align, goal) Link: http://lkml.kernel.org/r/1536927045-23536-21-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
15c3c114ed
commit
4fc4a09e4c
|
@ -331,7 +331,7 @@ cia_prepare_tbia_workaround(int window)
|
||||||
long i;
|
long i;
|
||||||
|
|
||||||
/* Use minimal 1K map. */
|
/* Use minimal 1K map. */
|
||||||
ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0);
|
ppte = memblock_alloc_from(CIA_BROKEN_TBIA_SIZE, 32768, 0);
|
||||||
pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
|
pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
|
||||||
|
|
||||||
for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
|
for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
|
||||||
|
|
|
@ -87,13 +87,13 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
|
||||||
printk("%s: couldn't allocate arena ptes from node %d\n"
|
printk("%s: couldn't allocate arena ptes from node %d\n"
|
||||||
" falling back to system-wide allocation\n",
|
" falling back to system-wide allocation\n",
|
||||||
__func__, nid);
|
__func__, nid);
|
||||||
arena->ptes = __alloc_bootmem(mem_size, align, 0);
|
arena->ptes = memblock_alloc_from(mem_size, align, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_DISCONTIGMEM */
|
#else /* CONFIG_DISCONTIGMEM */
|
||||||
|
|
||||||
arena = alloc_bootmem(sizeof(*arena));
|
arena = alloc_bootmem(sizeof(*arena));
|
||||||
arena->ptes = __alloc_bootmem(mem_size, align, 0);
|
arena->ptes = memblock_alloc_from(mem_size, align, 0);
|
||||||
|
|
||||||
#endif /* CONFIG_DISCONTIGMEM */
|
#endif /* CONFIG_DISCONTIGMEM */
|
||||||
|
|
||||||
|
|
|
@ -294,7 +294,7 @@ move_initrd(unsigned long mem_limit)
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
|
|
||||||
size = initrd_end - initrd_start;
|
size = initrd_end - initrd_start;
|
||||||
start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
|
start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0);
|
||||||
if (!start || __pa(start) + size > mem_limit) {
|
if (!start || __pa(start) + size > mem_limit) {
|
||||||
initrd_start = initrd_end = 0;
|
initrd_start = initrd_end = 0;
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -1835,7 +1835,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
|
||||||
/* Caller prevents this from being called after init */
|
/* Caller prevents this from being called after init */
|
||||||
static void * __ref mca_bootmem(void)
|
static void * __ref mca_bootmem(void)
|
||||||
{
|
{
|
||||||
return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
|
return memblock_alloc_from(sizeof(struct ia64_mca_cpu),
|
||||||
KERNEL_STACK_SIZE, 0);
|
KERNEL_STACK_SIZE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,8 +85,9 @@ skip:
|
||||||
static inline void
|
static inline void
|
||||||
alloc_per_cpu_data(void)
|
alloc_per_cpu_data(void)
|
||||||
{
|
{
|
||||||
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
|
cpu_data = memblock_alloc_from(PERCPU_PAGE_SIZE * num_possible_cpus(),
|
||||||
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
|
PERCPU_PAGE_SIZE,
|
||||||
|
__pa(MAX_DMA_ADDRESS));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -2263,7 +2263,7 @@ void __init trap_init(void)
|
||||||
|
|
||||||
memblock_set_bottom_up(true);
|
memblock_set_bottom_up(true);
|
||||||
ebase = (unsigned long)
|
ebase = (unsigned long)
|
||||||
__alloc_bootmem(size, 1 << fls(size), 0);
|
memblock_alloc_from(size, 1 << fls(size), 0);
|
||||||
memblock_set_bottom_up(false);
|
memblock_set_bottom_up(false);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -32,7 +32,7 @@ void * __init prom_early_alloc(unsigned long size)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
|
ret = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
|
||||||
if (ret != NULL)
|
if (ret != NULL)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
|
|
||||||
|
|
|
@ -1588,7 +1588,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
if (!node_online(node) || !NODE_DATA(node)) {
|
if (!node_online(node) || !NODE_DATA(node)) {
|
||||||
ptr = __alloc_bootmem(size, align, goal);
|
ptr = memblock_alloc_from(size, align, goal);
|
||||||
pr_info("cpu %d has no node %d or node-local memory\n",
|
pr_info("cpu %d has no node %d or node-local memory\n",
|
||||||
cpu, node);
|
cpu, node);
|
||||||
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
|
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
|
||||||
|
@ -1601,7 +1601,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
|
||||||
}
|
}
|
||||||
return ptr;
|
return ptr;
|
||||||
#else
|
#else
|
||||||
return __alloc_bootmem(size, align, goal);
|
return memblock_alloc_from(size, align, goal);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1627,7 +1627,7 @@ static void __init pcpu_populate_pte(unsigned long addr)
|
||||||
if (pgd_none(*pgd)) {
|
if (pgd_none(*pgd)) {
|
||||||
pud_t *new;
|
pud_t *new;
|
||||||
|
|
||||||
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
||||||
pgd_populate(&init_mm, pgd, new);
|
pgd_populate(&init_mm, pgd, new);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1635,7 +1635,7 @@ static void __init pcpu_populate_pte(unsigned long addr)
|
||||||
if (pud_none(*pud)) {
|
if (pud_none(*pud)) {
|
||||||
pmd_t *new;
|
pmd_t *new;
|
||||||
|
|
||||||
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
||||||
pud_populate(&init_mm, pud, new);
|
pud_populate(&init_mm, pud, new);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1643,7 +1643,7 @@ static void __init pcpu_populate_pte(unsigned long addr)
|
||||||
if (!pmd_present(*pmd)) {
|
if (!pmd_present(*pmd)) {
|
||||||
pte_t *new;
|
pte_t *new;
|
||||||
|
|
||||||
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
||||||
pmd_populate_kernel(&init_mm, pmd, new);
|
pmd_populate_kernel(&init_mm, pmd, new);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -265,7 +265,7 @@ void __init mem_init(void)
|
||||||
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
|
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
|
||||||
i += 1;
|
i += 1;
|
||||||
sparc_valid_addr_bitmap = (unsigned long *)
|
sparc_valid_addr_bitmap = (unsigned long *)
|
||||||
__alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL);
|
memblock_alloc_from(i << 2, SMP_CACHE_BYTES, 0UL);
|
||||||
|
|
||||||
if (sparc_valid_addr_bitmap == NULL) {
|
if (sparc_valid_addr_bitmap == NULL) {
|
||||||
prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
|
prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
|
||||||
|
|
|
@ -1811,7 +1811,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
|
||||||
if (pgd_none(*pgd)) {
|
if (pgd_none(*pgd)) {
|
||||||
pud_t *new;
|
pud_t *new;
|
||||||
|
|
||||||
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
|
||||||
|
PAGE_SIZE);
|
||||||
alloc_bytes += PAGE_SIZE;
|
alloc_bytes += PAGE_SIZE;
|
||||||
pgd_populate(&init_mm, pgd, new);
|
pgd_populate(&init_mm, pgd, new);
|
||||||
}
|
}
|
||||||
|
@ -1823,7 +1824,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
|
||||||
vstart = kernel_map_hugepud(vstart, vend, pud);
|
vstart = kernel_map_hugepud(vstart, vend, pud);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
|
||||||
|
PAGE_SIZE);
|
||||||
alloc_bytes += PAGE_SIZE;
|
alloc_bytes += PAGE_SIZE;
|
||||||
pud_populate(&init_mm, pud, new);
|
pud_populate(&init_mm, pud, new);
|
||||||
}
|
}
|
||||||
|
@ -1836,7 +1838,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
|
||||||
vstart = kernel_map_hugepmd(vstart, vend, pmd);
|
vstart = kernel_map_hugepmd(vstart, vend, pmd);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
|
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
|
||||||
|
PAGE_SIZE);
|
||||||
alloc_bytes += PAGE_SIZE;
|
alloc_bytes += PAGE_SIZE;
|
||||||
pmd_populate_kernel(&init_mm, pmd, new);
|
pmd_populate_kernel(&init_mm, pmd, new);
|
||||||
}
|
}
|
||||||
|
|
|
@ -303,12 +303,12 @@ static void __init srmmu_nocache_init(void)
|
||||||
|
|
||||||
bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
|
bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
|
||||||
|
|
||||||
srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
|
srmmu_nocache_pool = memblock_alloc_from(srmmu_nocache_size,
|
||||||
SRMMU_NOCACHE_ALIGN_MAX, 0UL);
|
SRMMU_NOCACHE_ALIGN_MAX, 0UL);
|
||||||
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
|
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
|
||||||
|
|
||||||
srmmu_nocache_bitmap =
|
srmmu_nocache_bitmap =
|
||||||
__alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
|
memblock_alloc_from(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
|
||||||
SMP_CACHE_BYTES, 0UL);
|
SMP_CACHE_BYTES, 0UL);
|
||||||
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
|
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
|
||||||
|
|
||||||
|
@ -467,7 +467,7 @@ static void __init sparc_context_init(int numctx)
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
|
|
||||||
size = numctx * sizeof(struct ctx_list);
|
size = numctx * sizeof(struct ctx_list);
|
||||||
ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
|
ctx_list_pool = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
|
||||||
|
|
||||||
for (ctx = 0; ctx < numctx; ctx++) {
|
for (ctx = 0; ctx < numctx; ctx++) {
|
||||||
struct ctx_list *clist;
|
struct ctx_list *clist;
|
||||||
|
|
|
@ -122,6 +122,14 @@ static inline void * __init memblock_alloc_raw(
|
||||||
NUMA_NO_NODE);
|
NUMA_NO_NODE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void * __init memblock_alloc_from(
|
||||||
|
phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
|
||||||
|
{
|
||||||
|
return memblock_alloc_try_nid(size, align, min_addr,
|
||||||
|
BOOTMEM_ALLOC_ACCESSIBLE,
|
||||||
|
NUMA_NO_NODE);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void * __init memblock_alloc_nopanic(
|
static inline void * __init memblock_alloc_nopanic(
|
||||||
phys_addr_t size, phys_addr_t align)
|
phys_addr_t size, phys_addr_t align)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue