mm: pass addr as unsigned long to vb_free
Ever use of addr in vb_free casts to unsigned long first, and the caller has an unsigned long version of the address available anyway. Just pass that and avoid all the casts. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Airlie <airlied@linux.ie> Cc: Gao Xiang <xiang@kernel.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Laura Abbott <labbott@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Michael Kelley <mikelley@microsoft.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Sakari Ailus <sakari.ailus@linux.intel.com> Cc: Stephen Hemminger <sthemmin@microsoft.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Wei Liu <wei.liu@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mackerras <paulus@ozlabs.org> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Link: http://lkml.kernel.org/r/20200414131348.444715-13-hch@lst.de Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b607e6d17d
commit
78a0e8c483
16
mm/vmalloc.c
16
mm/vmalloc.c
|
@ -1665,7 +1665,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
|||
return vaddr;
|
||||
}
|
||||
|
||||
static void vb_free(const void *addr, unsigned long size)
|
||||
static void vb_free(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long offset;
|
||||
unsigned long vb_idx;
|
||||
|
@ -1675,24 +1675,22 @@ static void vb_free(const void *addr, unsigned long size)
|
|||
BUG_ON(offset_in_page(size));
|
||||
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
|
||||
|
||||
flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
|
||||
flush_cache_vunmap(addr, addr + size);
|
||||
|
||||
order = get_order(size);
|
||||
|
||||
offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
|
||||
offset >>= PAGE_SHIFT;
|
||||
offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
|
||||
|
||||
vb_idx = addr_to_vb_idx((unsigned long)addr);
|
||||
vb_idx = addr_to_vb_idx(addr);
|
||||
rcu_read_lock();
|
||||
vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
|
||||
rcu_read_unlock();
|
||||
BUG_ON(!vb);
|
||||
|
||||
vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
|
||||
vunmap_page_range(addr, addr + size);
|
||||
|
||||
if (debug_pagealloc_enabled_static())
|
||||
flush_tlb_kernel_range((unsigned long)addr,
|
||||
(unsigned long)addr + size);
|
||||
flush_tlb_kernel_range(addr, addr + size);
|
||||
|
||||
spin_lock(&vb->lock);
|
||||
|
||||
|
@ -1792,7 +1790,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
|
|||
|
||||
if (likely(count <= VMAP_MAX_ALLOC)) {
|
||||
debug_check_no_locks_freed(mem, size);
|
||||
vb_free(mem, size);
|
||||
vb_free(addr, size);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue