mm: remove map_vm_range
Switch all callers to map_kernel_range, which symmetric to the unmap side (as well as the _noflush versions). Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Airlie <airlied@linux.ie> Cc: Gao Xiang <xiang@kernel.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Laura Abbott <labbott@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Michael Kelley <mikelley@microsoft.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Sakari Ailus <sakari.ailus@linux.intel.com> Cc: Stephen Hemminger <sthemmin@microsoft.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Wei Liu <wei.liu@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mackerras <paulus@ozlabs.org> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Link: http://lkml.kernel.org/r/20200414131348.444715-17-hch@lst.de Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
60bb44652a
commit
ed1f324c5f
|
@ -213,7 +213,7 @@ Here are the routines, one by one:
|
|||
there will be no entries in the cache for the kernel address
|
||||
space for virtual addresses in the range 'start' to 'end-1'.
|
||||
|
||||
The first of these two routines is invoked after map_vm_area()
|
||||
The first of these two routines is invoked after map_kernel_range()
|
||||
has installed the page table entries. The second is invoked
|
||||
before unmap_kernel_range() deletes the page table entries.
|
||||
|
||||
|
|
|
@ -168,11 +168,11 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
|
|||
extern struct vm_struct *remove_vm_area(const void *addr);
|
||||
extern struct vm_struct *find_vm_area(const void *addr);
|
||||
|
||||
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
||||
struct page **pages);
|
||||
#ifdef CONFIG_MMU
|
||||
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
|
||||
pgprot_t prot, struct page **pages);
|
||||
int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
|
||||
struct page **pages);
|
||||
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
|
||||
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
|
||||
static inline void set_vm_flush_reset_perms(void *addr)
|
||||
|
@ -189,14 +189,12 @@ map_kernel_range_noflush(unsigned long start, unsigned long size,
|
|||
{
|
||||
return size >> PAGE_SHIFT;
|
||||
}
|
||||
#define map_kernel_range map_kernel_range_noflush
|
||||
static inline void
|
||||
unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
|
||||
{
|
||||
}
|
||||
static inline void
|
||||
unmap_kernel_range(unsigned long addr, unsigned long size)
|
||||
{
|
||||
}
|
||||
#define unmap_kernel_range unmap_kernel_range_noflush
|
||||
static inline void set_vm_flush_reset_perms(void *addr)
|
||||
{
|
||||
}
|
||||
|
|
21
mm/vmalloc.c
21
mm/vmalloc.c
|
@ -273,8 +273,8 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int map_kernel_range(unsigned long start, unsigned long size,
|
||||
pgprot_t prot, struct page **pages)
|
||||
int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
|
||||
struct page **pages)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -2028,16 +2028,6 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
|
|||
flush_tlb_kernel_range(addr, end);
|
||||
}
|
||||
|
||||
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
|
||||
{
|
||||
unsigned long addr = (unsigned long)area->addr;
|
||||
int err;
|
||||
|
||||
err = map_kernel_range(addr, get_vm_area_size(area), prot, pages);
|
||||
|
||||
return err > 0 ? 0 : err;
|
||||
}
|
||||
|
||||
static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
|
||||
struct vmap_area *va, unsigned long flags, const void *caller)
|
||||
{
|
||||
|
@ -2409,7 +2399,8 @@ void *vmap(struct page **pages, unsigned int count,
|
|||
if (!area)
|
||||
return NULL;
|
||||
|
||||
if (map_vm_area(area, prot, pages)) {
|
||||
if (map_kernel_range((unsigned long)area->addr, size, prot,
|
||||
pages) < 0) {
|
||||
vunmap(area->addr);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -2472,8 +2463,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
|||
}
|
||||
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
|
||||
|
||||
if (map_vm_area(area, prot, pages))
|
||||
if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area),
|
||||
prot, pages) < 0)
|
||||
goto fail;
|
||||
|
||||
return area->addr;
|
||||
|
||||
fail:
|
||||
|
|
|
@ -1138,7 +1138,9 @@ static inline void __zs_cpu_down(struct mapping_area *area)
|
|||
static inline void *__zs_map_object(struct mapping_area *area,
|
||||
struct page *pages[2], int off, int size)
|
||||
{
|
||||
BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
|
||||
unsigned long addr = (unsigned long)area->vm->addr;
|
||||
|
||||
BUG_ON(map_kernel_range(addr, PAGE_SIZE * 2, PAGE_KERNEL, pages) < 0);
|
||||
area->vm_addr = area->vm->addr;
|
||||
return area->vm_addr + off;
|
||||
}
|
||||
|
|
|
@ -190,8 +190,7 @@ EXPORT_SYMBOL(ceph_compare_options);
|
|||
* kvmalloc() doesn't fall back to the vmalloc allocator unless flags are
|
||||
* compatible with (a superset of) GFP_KERNEL. This is because while the
|
||||
* actual pages are allocated with the specified flags, the page table pages
|
||||
* are always allocated with GFP_KERNEL. map_vm_area() doesn't even take
|
||||
* flags because GFP_KERNEL is hard-coded in {p4d,pud,pmd,pte}_alloc().
|
||||
* are always allocated with GFP_KERNEL.
|
||||
*
|
||||
* ceph_kvmalloc() may be called with GFP_KERNEL, GFP_NOFS or GFP_NOIO.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue