mm/vmalloc.c: clean up map_vm_area third argument

Currently map_vm_area() takes (struct page *** pages) as third argument,
and after mapping, it moves (*pages) to point to (*pages +
nr_mappped_pages).

It looks like this kind of increment is useless to its caller these
days.  The callers don't care about the increments and actually they're
trying to avoid this by passing another copy to map_vm_area().

The caller can always guarantee all the pages can be mapped into vm_area
as specified in first argument and the caller only cares about whether
map_vm_area() fails or not.

This patch cleans up the pointer movement in map_vm_area() and updates
its callers accordingly.

Signed-off-by: WANG Chao <chaowang@redhat.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
WANG Chao 2014-08-06 16:06:58 -07:00 committed by Linus Torvalds
parent 21bda264f4
commit f6f8ed4735
6 changed files with 11 additions and 20 deletions

View File

@ -58,7 +58,7 @@ void *module_alloc(unsigned long size)
area->nr_pages = npages; area->nr_pages = npages;
area->pages = pages; area->pages = pages;
if (map_vm_area(area, prot_rwx, &pages)) { if (map_vm_area(area, prot_rwx, pages)) {
vunmap(area->addr); vunmap(area->addr);
goto error; goto error;
} }

View File

@ -42,7 +42,6 @@ DEFINE_MUTEX(lguest_lock);
static __init int map_switcher(void) static __init int map_switcher(void)
{ {
int i, err; int i, err;
struct page **pagep;
/* /*
* Map the Switcher in to high memory. * Map the Switcher in to high memory.
@ -110,11 +109,9 @@ static __init int map_switcher(void)
* This code actually sets up the pages we've allocated to appear at * This code actually sets up the pages we've allocated to appear at
* switcher_addr. map_vm_area() takes the vma we allocated above, the * switcher_addr. map_vm_area() takes the vma we allocated above, the
* kind of pages we're mapping (kernel pages), and a pointer to our * kind of pages we're mapping (kernel pages), and a pointer to our
* array of struct pages. It increments that pointer, but we don't * array of struct pages.
* care.
*/ */
pagep = lg_switcher_pages; err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, lg_switcher_pages);
err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
if (err) { if (err) {
printk("lguest: map_vm_area failed: %i\n", err); printk("lguest: map_vm_area failed: %i\n", err);
goto free_vma; goto free_vma;

View File

@ -585,7 +585,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret; int ret;
struct page **page_array_ptr;
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
@ -598,8 +597,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
} }
tmp_area.addr = page_addr; tmp_area.addr = page_addr;
tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
page_array_ptr = page; ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
if (ret) { if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
proc->pid, page_addr); proc->pid, page_addr);

View File

@ -113,7 +113,7 @@ extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot, extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages); struct page **pages);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern int map_kernel_range_noflush(unsigned long start, unsigned long size, extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages); pgprot_t prot, struct page **pages);

View File

@ -1270,19 +1270,15 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
} }
EXPORT_SYMBOL_GPL(unmap_kernel_range); EXPORT_SYMBOL_GPL(unmap_kernel_range);
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
{ {
unsigned long addr = (unsigned long)area->addr; unsigned long addr = (unsigned long)area->addr;
unsigned long end = addr + get_vm_area_size(area); unsigned long end = addr + get_vm_area_size(area);
int err; int err;
err = vmap_page_range(addr, end, prot, *pages); err = vmap_page_range(addr, end, prot, pages);
if (err > 0) {
*pages += err;
err = 0;
}
return err; return err > 0 ? 0 : err;
} }
EXPORT_SYMBOL_GPL(map_vm_area); EXPORT_SYMBOL_GPL(map_vm_area);
@ -1548,7 +1544,7 @@ void *vmap(struct page **pages, unsigned int count,
if (!area) if (!area)
return NULL; return NULL;
if (map_vm_area(area, prot, &pages)) { if (map_vm_area(area, prot, pages)) {
vunmap(area->addr); vunmap(area->addr);
return NULL; return NULL;
} }
@ -1606,7 +1602,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
cond_resched(); cond_resched();
} }
if (map_vm_area(area, prot, &pages)) if (map_vm_area(area, prot, pages))
goto fail; goto fail;
return area->addr; return area->addr;

View File

@ -690,7 +690,7 @@ static inline void __zs_cpu_down(struct mapping_area *area)
static inline void *__zs_map_object(struct mapping_area *area, static inline void *__zs_map_object(struct mapping_area *area,
struct page *pages[2], int off, int size) struct page *pages[2], int off, int size)
{ {
BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages)); BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
area->vm_addr = area->vm->addr; area->vm_addr = area->vm->addr;
return area->vm_addr + off; return area->vm_addr + off;
} }