mm: remove nobootmem
Move a few remaining functions from nobootmem.c to memblock.c and remove nobootmem Link: http://lkml.kernel.org/r/1536927045-23536-28-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7c2ee349cf
commit
bda49a8116
|
@ -42,7 +42,6 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
|
|||
debug.o $(mmu-y)
|
||||
|
||||
obj-y += init-mm.o
|
||||
obj-y += nobootmem.o
|
||||
obj-y += memblock.o
|
||||
|
||||
ifdef CONFIG_MMU
|
||||
|
|
104
mm/memblock.c
104
mm/memblock.c
|
@ -82,6 +82,16 @@
|
|||
* initialization compltes.
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
struct pglist_data __refdata contig_page_data;
|
||||
EXPORT_SYMBOL(contig_page_data);
|
||||
#endif
|
||||
|
||||
unsigned long max_low_pfn;
|
||||
unsigned long min_low_pfn;
|
||||
unsigned long max_pfn;
|
||||
unsigned long long max_possible_pfn;
|
||||
|
||||
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
|
||||
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
|
||||
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
|
||||
|
@ -1877,6 +1887,100 @@ static int __init early_memblock(char *p)
|
|||
}
|
||||
early_param("memblock", early_memblock);
|
||||
|
||||
static void __init __free_pages_memory(unsigned long start, unsigned long end)
|
||||
{
|
||||
int order;
|
||||
|
||||
while (start < end) {
|
||||
order = min(MAX_ORDER - 1UL, __ffs(start));
|
||||
|
||||
while (start + (1UL << order) > end)
|
||||
order--;
|
||||
|
||||
memblock_free_pages(pfn_to_page(start), start, order);
|
||||
|
||||
start += (1UL << order);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long __init __free_memory_core(phys_addr_t start,
|
||||
phys_addr_t end)
|
||||
{
|
||||
unsigned long start_pfn = PFN_UP(start);
|
||||
unsigned long end_pfn = min_t(unsigned long,
|
||||
PFN_DOWN(end), max_low_pfn);
|
||||
|
||||
if (start_pfn >= end_pfn)
|
||||
return 0;
|
||||
|
||||
__free_pages_memory(start_pfn, end_pfn);
|
||||
|
||||
return end_pfn - start_pfn;
|
||||
}
|
||||
|
||||
static unsigned long __init free_low_memory_core_early(void)
|
||||
{
|
||||
unsigned long count = 0;
|
||||
phys_addr_t start, end;
|
||||
u64 i;
|
||||
|
||||
memblock_clear_hotplug(0, -1);
|
||||
|
||||
for_each_reserved_mem_region(i, &start, &end)
|
||||
reserve_bootmem_region(start, end);
|
||||
|
||||
/*
|
||||
* We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
|
||||
* because in some case like Node0 doesn't have RAM installed
|
||||
* low ram will be on Node1
|
||||
*/
|
||||
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
|
||||
NULL)
|
||||
count += __free_memory_core(start, end);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int reset_managed_pages_done __initdata;
|
||||
|
||||
void reset_node_managed_pages(pg_data_t *pgdat)
|
||||
{
|
||||
struct zone *z;
|
||||
|
||||
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
|
||||
z->managed_pages = 0;
|
||||
}
|
||||
|
||||
void __init reset_all_zones_managed_pages(void)
|
||||
{
|
||||
struct pglist_data *pgdat;
|
||||
|
||||
if (reset_managed_pages_done)
|
||||
return;
|
||||
|
||||
for_each_online_pgdat(pgdat)
|
||||
reset_node_managed_pages(pgdat);
|
||||
|
||||
reset_managed_pages_done = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* memblock_free_all - release free pages to the buddy allocator
|
||||
*
|
||||
* Return: the number of pages actually released.
|
||||
*/
|
||||
unsigned long __init memblock_free_all(void)
|
||||
{
|
||||
unsigned long pages;
|
||||
|
||||
reset_all_zones_managed_pages();
|
||||
|
||||
pages = free_low_memory_core_early();
|
||||
totalram_pages += pages;
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
|
||||
|
||||
static int memblock_debug_show(struct seq_file *m, void *private)
|
||||
|
|
128
mm/nobootmem.c
128
mm/nobootmem.c
|
@ -1,128 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* bootmem - A boot-time physical memory allocator and configurator
|
||||
*
|
||||
* Copyright (C) 1999 Ingo Molnar
|
||||
* 1999 Kanoj Sarcar, SGI
|
||||
* 2008 Johannes Weiner
|
||||
*
|
||||
* Access to this subsystem has to be serialized externally (which is true
|
||||
* for the boot process anyway).
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/range.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
#include <asm/bug.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
struct pglist_data __refdata contig_page_data;
|
||||
EXPORT_SYMBOL(contig_page_data);
|
||||
#endif
|
||||
|
||||
unsigned long max_low_pfn;
|
||||
unsigned long min_low_pfn;
|
||||
unsigned long max_pfn;
|
||||
unsigned long long max_possible_pfn;
|
||||
|
||||
static void __init __free_pages_memory(unsigned long start, unsigned long end)
|
||||
{
|
||||
int order;
|
||||
|
||||
while (start < end) {
|
||||
order = min(MAX_ORDER - 1UL, __ffs(start));
|
||||
|
||||
while (start + (1UL << order) > end)
|
||||
order--;
|
||||
|
||||
memblock_free_pages(pfn_to_page(start), start, order);
|
||||
|
||||
start += (1UL << order);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long __init __free_memory_core(phys_addr_t start,
|
||||
phys_addr_t end)
|
||||
{
|
||||
unsigned long start_pfn = PFN_UP(start);
|
||||
unsigned long end_pfn = min_t(unsigned long,
|
||||
PFN_DOWN(end), max_low_pfn);
|
||||
|
||||
if (start_pfn >= end_pfn)
|
||||
return 0;
|
||||
|
||||
__free_pages_memory(start_pfn, end_pfn);
|
||||
|
||||
return end_pfn - start_pfn;
|
||||
}
|
||||
|
||||
static unsigned long __init free_low_memory_core_early(void)
|
||||
{
|
||||
unsigned long count = 0;
|
||||
phys_addr_t start, end;
|
||||
u64 i;
|
||||
|
||||
memblock_clear_hotplug(0, -1);
|
||||
|
||||
for_each_reserved_mem_region(i, &start, &end)
|
||||
reserve_bootmem_region(start, end);
|
||||
|
||||
/*
|
||||
* We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
|
||||
* because in some case like Node0 doesn't have RAM installed
|
||||
* low ram will be on Node1
|
||||
*/
|
||||
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
|
||||
NULL)
|
||||
count += __free_memory_core(start, end);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int reset_managed_pages_done __initdata;
|
||||
|
||||
void reset_node_managed_pages(pg_data_t *pgdat)
|
||||
{
|
||||
struct zone *z;
|
||||
|
||||
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
|
||||
z->managed_pages = 0;
|
||||
}
|
||||
|
||||
void __init reset_all_zones_managed_pages(void)
|
||||
{
|
||||
struct pglist_data *pgdat;
|
||||
|
||||
if (reset_managed_pages_done)
|
||||
return;
|
||||
|
||||
for_each_online_pgdat(pgdat)
|
||||
reset_node_managed_pages(pgdat);
|
||||
|
||||
reset_managed_pages_done = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* memblock_free_all - release free pages to the buddy allocator
|
||||
*
|
||||
* Return: the number of pages actually released.
|
||||
*/
|
||||
unsigned long __init memblock_free_all(void)
|
||||
{
|
||||
unsigned long pages;
|
||||
|
||||
reset_all_zones_managed_pages();
|
||||
|
||||
pages = free_low_memory_core_early();
|
||||
totalram_pages += pages;
|
||||
|
||||
return pages;
|
||||
}
|
Loading…
Reference in New Issue