[PATCH] catch valid mem range at onlining memory
This patch allows hot-add memory which is not aligned to section. Now, hot-added memory has to be aligned to section size. Considering big section sized archs, this is not useful. When hot-added memory is registerd as iomem resoruce by iomem resource patch, we can make use of that information to detect valid memory range. Note: With this, not-aligned memory can be registerd. To allow hot-add memory with holes, we have to do more work around add_memory(). (It doesn't allows add memory to already existing mem section.) Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
0a54703904
commit
2842f11419
|
@ -105,6 +105,9 @@ extern int allocate_resource(struct resource *root, struct resource *new,
|
||||||
int adjust_resource(struct resource *res, unsigned long start,
|
int adjust_resource(struct resource *res, unsigned long start,
|
||||||
unsigned long size);
|
unsigned long size);
|
||||||
|
|
||||||
|
/* get registered SYSTEM_RAM resources in specified area */
|
||||||
|
extern int find_next_system_ram(struct resource *res);
|
||||||
|
|
||||||
/* Convenience shorthand with allocation */
|
/* Convenience shorthand with allocation */
|
||||||
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
|
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
|
||||||
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
|
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
|
||||||
|
|
|
@ -232,6 +232,44 @@ int release_resource(struct resource *old)
|
||||||
|
|
||||||
EXPORT_SYMBOL(release_resource);
|
EXPORT_SYMBOL(release_resource);
|
||||||
|
|
||||||
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
|
/*
|
||||||
|
* Finds the lowest memory reosurce exists within [res->start.res->end)
|
||||||
|
* the caller must specify res->start, res->end, res->flags.
|
||||||
|
* If found, returns 0, res is overwritten, if not found, returns -1.
|
||||||
|
*/
|
||||||
|
int find_next_system_ram(struct resource *res)
|
||||||
|
{
|
||||||
|
resource_size_t start, end;
|
||||||
|
struct resource *p;
|
||||||
|
|
||||||
|
BUG_ON(!res);
|
||||||
|
|
||||||
|
start = res->start;
|
||||||
|
end = res->end;
|
||||||
|
|
||||||
|
read_lock(&resource_lock);
|
||||||
|
for (p = iomem_resource.child; p ; p = p->sibling) {
|
||||||
|
/* system ram is just marked as IORESOURCE_MEM */
|
||||||
|
if (p->flags != res->flags)
|
||||||
|
continue;
|
||||||
|
if (p->start > end) {
|
||||||
|
p = NULL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (p->start >= start)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
read_unlock(&resource_lock);
|
||||||
|
if (!p)
|
||||||
|
return -1;
|
||||||
|
/* copy data */
|
||||||
|
res->start = p->start;
|
||||||
|
res->end = p->end;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find empty slot in the resource tree given range and alignment.
|
* Find empty slot in the resource tree given range and alignment.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -127,6 +127,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long onlined_pages = 0;
|
unsigned long onlined_pages = 0;
|
||||||
|
struct resource res;
|
||||||
|
u64 section_end;
|
||||||
|
unsigned long start_pfn;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
int need_zonelists_rebuild = 0;
|
int need_zonelists_rebuild = 0;
|
||||||
|
|
||||||
|
@ -149,11 +152,28 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
|
||||||
if (!populated_zone(zone))
|
if (!populated_zone(zone))
|
||||||
need_zonelists_rebuild = 1;
|
need_zonelists_rebuild = 1;
|
||||||
|
|
||||||
|
res.start = (u64)pfn << PAGE_SHIFT;
|
||||||
|
res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
|
||||||
|
res.flags = IORESOURCE_MEM; /* we just need system ram */
|
||||||
|
section_end = res.end;
|
||||||
|
|
||||||
|
while (find_next_system_ram(&res) >= 0) {
|
||||||
|
start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
|
||||||
|
nr_pages = (unsigned long)
|
||||||
|
((res.end + 1 - res.start) >> PAGE_SHIFT);
|
||||||
|
|
||||||
|
if (PageReserved(pfn_to_page(start_pfn))) {
|
||||||
|
/* this region's page is not onlined now */
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_pages; i++) {
|
||||||
struct page *page = pfn_to_page(pfn + i);
|
struct page *page = pfn_to_page(start_pfn + i);
|
||||||
online_page(page);
|
online_page(page);
|
||||||
onlined_pages++;
|
onlined_pages++;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res.start = res.end + 1;
|
||||||
|
res.end = section_end;
|
||||||
|
}
|
||||||
zone->present_pages += onlined_pages;
|
zone->present_pages += onlined_pages;
|
||||||
zone->zone_pgdat->node_present_pages += onlined_pages;
|
zone->zone_pgdat->node_present_pages += onlined_pages;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue