From 712cd386fdc983d318fecf302a2a9cb8e9de90c9 Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Tue, 11 Dec 2012 16:01:07 -0800 Subject: [PATCH] mm/memory_hotplug.c: update start_pfn in zone and pg_data when spanned_pages == 0. If we hot-remove memory only and leave the cpus alive, the corresponding node will not be removed. But the node_start_pfn and node_spanned_pages in pg_data will be reset to 0. In this case, when we hot-add the memory back next time, the node_start_pfn will always be 0 because no pfn is less than 0. After that, if we hot-remove the memory again, it will cause kernel panic in function find_biggest_section_pfn() when it tries to scan all the pfns. The zone will also have the same problem. This patch sets start_pfn to the start_pfn of the section being added when spanned_pages of the zone or pg_data is 0. ---How to reproduce--- 1. hot-add a container with some memory and cpus; 2. hot-remove the container's memory, and leave cpus there; 3. hot-add these memory again; 4. hot-remove them again; then, the kernel will panic. ---Call trace--- BUG: unable to handle kernel paging request at 00000fff82a8cc38 IP: [] find_biggest_section_pfn+0xe5/0x180 ...... Call Trace: [] __remove_zone+0x184/0x1b0 [] __remove_section+0x8c/0xb0 [] __remove_pages+0xe7/0x120 [] arch_remove_memory+0x2c/0x80 [] remove_memory+0x56/0x90 [] acpi_memory_device_remove_memory+0x48/0x73 [] acpi_memory_device_notify+0x153/0x274 [] acpi_ev_notify_dispatch+0x41/0x5f [] acpi_os_execute_deferred+0x27/0x34 [] process_one_work+0x219/0x680 [] worker_thread+0x12e/0x320 [] kthread+0xc6/0xd0 [] kernel_thread_helper+0x4/0x10 ...... ---[ end trace 96d845dbf33fee11 ]--- Signed-off-by: Tang Chen Cc: Yasuaki Ishimatsu Cc: Wen Congyang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 72195602ded5..571130ee66d7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -205,7 +205,7 @@ static void grow_zone_span(struct zone *zone, unsigned long start_pfn, zone_span_writelock(zone); old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; - if (start_pfn < zone->zone_start_pfn) + if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn) zone->zone_start_pfn = start_pfn; zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - @@ -220,7 +220,7 @@ static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, unsigned long old_pgdat_end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages; - if (start_pfn < pgdat->node_start_pfn) + if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) pgdat->node_start_pfn = start_pfn; pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -