x86, mm: Separate x86_64 vmalloc_sync_all() into separate functions
No behavior change. Move some of vmalloc_sync_all() code into a new function sync_global_pgds() that will be useful for memory hotplug. Signed-off-by: Haicheng Li <haicheng.li@linux.intel.com> LKML-Reference: <4C6E4ECD.1090607@linux.intel.com> Reviewed-by: Wu Fengguang <fengguang.wu@intel.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
parent
61c77326d1
commit
6afb5157b9
|
@ -102,6 +102,8 @@ static inline void native_pgd_clear(pgd_t *pgd)
|
|||
native_set_pgd(pgd, native_make_pgd(0));
|
||||
}
|
||||
|
||||
extern void sync_global_pgds(unsigned long start, unsigned long end);
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
|
|
@ -326,29 +326,7 @@ out:
|
|||
|
||||
void vmalloc_sync_all(void)
|
||||
{
|
||||
unsigned long address;
|
||||
|
||||
for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
|
||||
address += PGDIR_SIZE) {
|
||||
|
||||
const pgd_t *pgd_ref = pgd_offset_k(address);
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
|
||||
if (pgd_none(*pgd_ref))
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&pgd_lock, flags);
|
||||
list_for_each_entry(page, &pgd_list, lru) {
|
||||
pgd_t *pgd;
|
||||
pgd = (pgd_t *)page_address(page) + pgd_index(address);
|
||||
if (pgd_none(*pgd))
|
||||
set_pgd(pgd, *pgd_ref);
|
||||
else
|
||||
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
|
||||
}
|
||||
spin_unlock_irqrestore(&pgd_lock, flags);
|
||||
}
|
||||
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -97,6 +97,36 @@ static int __init nonx32_setup(char *str)
|
|||
}
|
||||
__setup("noexec32=", nonx32_setup);
|
||||
|
||||
/*
|
||||
* When memory was added/removed make sure all the processes MM have
|
||||
* suitable PGD entries in the local PGD level page.
|
||||
*/
|
||||
void sync_global_pgds(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long address;
|
||||
|
||||
for (address = start; address <= end; address += PGDIR_SIZE) {
|
||||
const pgd_t *pgd_ref = pgd_offset_k(address);
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
|
||||
if (pgd_none(*pgd_ref))
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&pgd_lock, flags);
|
||||
list_for_each_entry(page, &pgd_list, lru) {
|
||||
pgd_t *pgd;
|
||||
pgd = (pgd_t *)page_address(page) + pgd_index(address);
|
||||
if (pgd_none(*pgd))
|
||||
set_pgd(pgd, *pgd_ref);
|
||||
else
|
||||
BUG_ON(pgd_page_vaddr(*pgd)
|
||||
!= pgd_page_vaddr(*pgd_ref));
|
||||
}
|
||||
spin_unlock_irqrestore(&pgd_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: This function is marked __ref because it calls __init function
|
||||
* (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
|
||||
|
|
Loading…
Reference in New Issue