mm/memory_hotplug: move and simplify walk_memory_blocks()
Let's move walk_memory_blocks() to the place where memory block logic resides and simplify it. While at it, add a type for the callback function. Link: http://lkml.kernel.org/r/20190614100114.311-6-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Rafael J. Wysocki" <rafael@kernel.org> Cc: David Hildenbrand <david@redhat.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Andrew Banman <andrew.banman@hpe.com> Cc: Mike Travis <mike.travis@hpe.com> Cc: Oscar Salvador <osalvador@suse.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Wei Yang <richard.weiyang@gmail.com> Cc: Arun KS <arunks@codeaurora.org> Cc: Qian Cai <cai@lca.pw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fbcf73ce65
commit
ea8846411a
|
@ -44,6 +44,11 @@ static inline unsigned long pfn_to_block_id(unsigned long pfn)
|
|||
return base_memory_block_id(pfn_to_section_nr(pfn));
|
||||
}
|
||||
|
||||
static inline unsigned long phys_to_block_id(unsigned long phys)
|
||||
{
|
||||
return pfn_to_block_id(PFN_DOWN(phys));
|
||||
}
|
||||
|
||||
static int memory_subsys_online(struct device *dev);
|
||||
static int memory_subsys_offline(struct device *dev);
|
||||
|
||||
|
@ -851,3 +856,40 @@ out:
|
|||
printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* walk_memory_blocks - walk through all present memory blocks overlapped
|
||||
* by the range [start, start + size)
|
||||
*
|
||||
* @start: start address of the memory range
|
||||
* @size: size of the memory range
|
||||
* @arg: argument passed to func
|
||||
* @func: callback for each memory section walked
|
||||
*
|
||||
* This function walks through all present memory blocks overlapped by the
|
||||
* range [start, start + size), calling func on each memory block.
|
||||
*
|
||||
* In case func() returns an error, walking is aborted and the error is
|
||||
* returned.
|
||||
*/
|
||||
int walk_memory_blocks(unsigned long start, unsigned long size,
|
||||
void *arg, walk_memory_blocks_func_t func)
|
||||
{
|
||||
const unsigned long start_block_id = phys_to_block_id(start);
|
||||
const unsigned long end_block_id = phys_to_block_id(start + size - 1);
|
||||
struct memory_block *mem;
|
||||
unsigned long block_id;
|
||||
int ret = 0;
|
||||
|
||||
for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
|
||||
mem = find_memory_block_by_id(block_id, NULL);
|
||||
if (!mem)
|
||||
continue;
|
||||
|
||||
ret = func(mem, arg);
|
||||
put_device(&mem->dev);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -119,6 +119,9 @@ extern int memory_isolate_notify(unsigned long val, void *v);
|
|||
extern struct memory_block *find_memory_block_hinted(struct mem_section *,
|
||||
struct memory_block *);
|
||||
extern struct memory_block *find_memory_block(struct mem_section *);
|
||||
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
|
||||
extern int walk_memory_blocks(unsigned long start, unsigned long size,
|
||||
void *arg, walk_memory_blocks_func_t func);
|
||||
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
|
||||
|
||||
|
|
|
@ -340,8 +340,6 @@ static inline void __remove_memory(int nid, u64 start, u64 size) {}
|
|||
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
||||
|
||||
extern void __ref free_area_init_core_hotplug(int nid);
|
||||
extern int walk_memory_blocks(unsigned long start, unsigned long size,
|
||||
void *arg, int (*func)(struct memory_block *, void *));
|
||||
extern int __add_memory(int nid, u64 start, u64 size);
|
||||
extern int add_memory(int nid, u64 start, u64 size);
|
||||
extern int add_memory_resource(int nid, struct resource *resource);
|
||||
|
|
|
@ -1659,62 +1659,7 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
|||
{
|
||||
return __offline_pages(start_pfn, start_pfn + nr_pages);
|
||||
}
|
||||
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
||||
|
||||
/**
|
||||
* walk_memory_blocks - walk through all present memory blocks overlapped
|
||||
* by the range [start, start + size)
|
||||
*
|
||||
* @start: start address of the memory range
|
||||
* @size: size of the memory range
|
||||
* @arg: argument passed to func
|
||||
* @func: callback for each memory block walked
|
||||
*
|
||||
* This function walks through all present memory blocks overlapped by the
|
||||
* range [start, start + size), calling func on each memory block.
|
||||
*
|
||||
* Returns the return value of func.
|
||||
*/
|
||||
int walk_memory_blocks(unsigned long start, unsigned long size,
|
||||
void *arg, int (*func)(struct memory_block *, void *))
|
||||
{
|
||||
const unsigned long start_pfn = PFN_DOWN(start);
|
||||
const unsigned long end_pfn = PFN_UP(start + size - 1);
|
||||
struct memory_block *mem = NULL;
|
||||
struct mem_section *section;
|
||||
unsigned long pfn, section_nr;
|
||||
int ret;
|
||||
|
||||
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
|
||||
section_nr = pfn_to_section_nr(pfn);
|
||||
if (!present_section_nr(section_nr))
|
||||
continue;
|
||||
|
||||
section = __nr_to_section(section_nr);
|
||||
/* same memblock? */
|
||||
if (mem)
|
||||
if ((section_nr >= mem->start_section_nr) &&
|
||||
(section_nr <= mem->end_section_nr))
|
||||
continue;
|
||||
|
||||
mem = find_memory_block_hinted(section, mem);
|
||||
if (!mem)
|
||||
continue;
|
||||
|
||||
ret = func(mem, arg);
|
||||
if (ret) {
|
||||
kobject_put(&mem->dev.kobj);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (mem)
|
||||
kobject_put(&mem->dev.kobj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
|
||||
{
|
||||
int ret = !is_memblock_offlined(mem);
|
||||
|
|
Loading…
Reference in New Issue