arch/kmap_atomic: consolidate duplicate code
Every arch has the same code to ensure atomic operations and a check for !HIGHMEM page. Remove the duplicate code by defining a core kmap_atomic() which only calls the arch specific kmap_atomic_high() when the page is high memory. [akpm@linux-foundation.org: coding style fixes] Signed-off-by: Ira Weiny <ira.weiny@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andy Lutomirski <luto@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Christian König <christian.koenig@amd.com> Cc: Chris Zankel <chris@zankel.net> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Helge Deller <deller@gmx.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20200507150004.1423069-7-ira.weiny@intel.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ee9bc5fdf5
commit
78b6d91ec7
|
@ -30,7 +30,6 @@
|
|||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
extern void *kmap_atomic(struct page *page);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
|
||||
extern void kmap_init(void);
|
||||
|
|
|
@ -49,16 +49,11 @@
|
|||
extern pte_t * pkmap_page_table;
|
||||
static pte_t * fixmap_page_table;
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high(struct page *page)
|
||||
{
|
||||
int idx, cpu_idx;
|
||||
unsigned long vaddr;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
cpu_idx = kmap_atomic_idx_push();
|
||||
idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
|
||||
vaddr = FIXMAP_ADDR(idx);
|
||||
|
@ -68,7 +63,7 @@ void *kmap_atomic(struct page *page)
|
|||
|
||||
return (void *)vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
EXPORT_SYMBOL(kmap_atomic_high);
|
||||
|
||||
void __kunmap_atomic(void *kv)
|
||||
{
|
||||
|
|
|
@ -60,7 +60,6 @@ static inline void *kmap_high_get(struct page *page)
|
|||
* when CONFIG_HIGHMEM is not set.
|
||||
*/
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
extern void *kmap_atomic(struct page *page);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
extern void *kmap_atomic_pfn(unsigned long pfn);
|
||||
#endif
|
||||
|
|
|
@ -31,18 +31,13 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
|
|||
return *ptep;
|
||||
}
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high(struct page *page)
|
||||
{
|
||||
unsigned int idx;
|
||||
unsigned long vaddr;
|
||||
void *kmap;
|
||||
int type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
/*
|
||||
* There is no cache coherency issue when non VIVT, so force the
|
||||
|
@ -76,7 +71,7 @@ void *kmap_atomic(struct page *page)
|
|||
|
||||
return (void *)vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
EXPORT_SYMBOL(kmap_atomic_high);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
{
|
||||
|
|
|
@ -32,7 +32,6 @@ extern pte_t *pkmap_page_table;
|
|||
|
||||
#define ARCH_HAS_KMAP_FLUSH_TLB
|
||||
extern void kmap_flush_tlb(unsigned long addr);
|
||||
extern void *kmap_atomic(struct page *page);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
extern void *kmap_atomic_pfn(unsigned long pfn);
|
||||
extern struct page *kmap_atomic_to_page(void *ptr);
|
||||
|
|
|
@ -21,16 +21,11 @@ EXPORT_SYMBOL(kmap_flush_tlb);
|
|||
|
||||
EXPORT_SYMBOL(kmap);
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high(struct page *page)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
|
@ -42,7 +37,7 @@ void *kmap_atomic(struct page *page)
|
|||
|
||||
return (void *)vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
EXPORT_SYMBOL(kmap_atomic_high);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
{
|
||||
|
|
|
@ -63,9 +63,9 @@ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|||
}
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
|
||||
static inline void *kmap_atomic(struct page *page)
|
||||
static inline void *kmap_atomic_high(struct page *page)
|
||||
{
|
||||
return kmap_atomic_prot(page, kmap_prot);
|
||||
return kmap_atomic_high_prot(page, kmap_prot);
|
||||
}
|
||||
|
||||
#define flush_cache_kmaps() { flush_icache(); flush_dcache(); }
|
||||
|
|
|
@ -48,7 +48,6 @@ extern pte_t *pkmap_page_table;
|
|||
|
||||
#define ARCH_HAS_KMAP_FLUSH_TLB
|
||||
extern void kmap_flush_tlb(unsigned long addr);
|
||||
extern void *kmap_atomic(struct page *page);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
extern void *kmap_atomic_pfn(unsigned long pfn);
|
||||
|
||||
|
|
|
@ -14,9 +14,9 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/highmem.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu-features.h>
|
||||
|
|
|
@ -18,25 +18,11 @@ void kmap_flush_tlb(unsigned long addr)
|
|||
}
|
||||
EXPORT_SYMBOL(kmap_flush_tlb);
|
||||
|
||||
/*
|
||||
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
|
||||
* no global lock is needed and because the kmap code must perform a global TLB
|
||||
* invalidation when the kmap pool wraps.
|
||||
*
|
||||
* However when holding an atomic kmap is is not legal to sleep, so atomic
|
||||
* kmaps are appropriate for short, tight code paths only.
|
||||
*/
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high(struct page *page)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
|
@ -48,7 +34,7 @@ void *kmap_atomic(struct page *page)
|
|||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
EXPORT_SYMBOL(kmap_atomic_high);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
{
|
||||
|
|
|
@ -51,7 +51,6 @@ extern void kmap_init(void);
|
|||
* when CONFIG_HIGHMEM is not set.
|
||||
*/
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
extern void *kmap_atomic(struct page *page);
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
extern void *kmap_atomic_pfn(unsigned long pfn);
|
||||
extern struct page *kmap_atomic_to_page(void *ptr);
|
||||
|
|
|
@ -10,18 +10,13 @@
|
|||
#include <asm/fixmap.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high(struct page *page)
|
||||
{
|
||||
unsigned int idx;
|
||||
unsigned long vaddr, pte;
|
||||
int type;
|
||||
pte_t *ptep;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
|
||||
idx = type + KM_TYPE_NR * smp_processor_id();
|
||||
|
@ -36,8 +31,7 @@ void *kmap_atomic(struct page *page)
|
|||
__nds32__isb();
|
||||
return (void *)vaddr;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
EXPORT_SYMBOL(kmap_atomic_high);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
{
|
||||
|
@ -53,5 +47,4 @@ void __kunmap_atomic(void *kvaddr)
|
|||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
|
|
@ -71,9 +71,9 @@ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|||
}
|
||||
extern void __kunmap_atomic(void *kvaddr);
|
||||
|
||||
static inline void *kmap_atomic(struct page *page)
|
||||
static inline void *kmap_atomic_high(struct page *page)
|
||||
{
|
||||
return kmap_atomic_prot(page, kmap_prot);
|
||||
return kmap_atomic_high_prot(page, kmap_prot);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -24,12 +24,6 @@
|
|||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/*
|
||||
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
|
||||
* gives a more generic (and caching) interface. But kmap_atomic can
|
||||
* be used in IRQ contexts, so in some (very limited) cases we need
|
||||
* it.
|
||||
*/
|
||||
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
|
|
|
@ -50,7 +50,6 @@ void kmap_init(void) __init;
|
|||
|
||||
#define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
|
||||
|
||||
void *kmap_atomic(struct page *page);
|
||||
void __kunmap_atomic(void *kvaddr);
|
||||
|
||||
#define flush_cache_kmaps() flush_cache_all()
|
||||
|
|
|
@ -53,16 +53,11 @@ void __init kmap_init(void)
|
|||
kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
|
||||
}
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high(struct page *page)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
long idx, type;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
|
@ -87,7 +82,7 @@ void *kmap_atomic(struct page *page)
|
|||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
EXPORT_SYMBOL(kmap_atomic_high);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
{
|
||||
|
|
|
@ -68,7 +68,10 @@ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|||
|
||||
return kmap_atomic_high_prot(page, prot);
|
||||
}
|
||||
void *kmap_atomic(struct page *page);
|
||||
static inline void *kmap_atomic_high(struct page *page)
|
||||
{
|
||||
return kmap_atomic_high_prot(page, kmap_prot);
|
||||
}
|
||||
void __kunmap_atomic(void *kvaddr);
|
||||
void *kmap_atomic_pfn(unsigned long pfn);
|
||||
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
|
||||
|
|
|
@ -4,14 +4,6 @@
|
|||
#include <linux/swap.h> /* for totalram_pages */
|
||||
#include <linux/memblock.h>
|
||||
|
||||
/*
|
||||
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
|
||||
* no global lock is needed and because the kmap code must perform a global TLB
|
||||
* invalidation when the kmap pool wraps.
|
||||
*
|
||||
* However when holding an atomic kmap it is not legal to sleep, so atomic
|
||||
* kmaps are appropriate for short, tight code paths only.
|
||||
*/
|
||||
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
|
@ -28,12 +20,6 @@ void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
|
|||
}
|
||||
EXPORT_SYMBOL(kmap_atomic_high_prot);
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
{
|
||||
return kmap_atomic_prot(page, kmap_prot);
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
|
||||
/*
|
||||
* This is the same as kmap_atomic() but can map memory that doesn't
|
||||
* have a struct page associated with it.
|
||||
|
|
|
@ -68,7 +68,6 @@ static inline void flush_cache_kmaps(void)
|
|||
flush_cache_all();
|
||||
}
|
||||
|
||||
void *kmap_atomic(struct page *page);
|
||||
void __kunmap_atomic(void *kvaddr);
|
||||
|
||||
void kmap_init(void);
|
||||
|
|
|
@ -37,16 +37,11 @@ static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
|
|||
color;
|
||||
}
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
void *kmap_atomic_high(struct page *page)
|
||||
{
|
||||
enum fixed_addresses idx;
|
||||
unsigned long vaddr;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
idx = kmap_idx(kmap_atomic_idx_push(),
|
||||
DCACHE_ALIAS(page_to_phys(page)));
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
|
@ -57,7 +52,7 @@ void *kmap_atomic(struct page *page)
|
|||
|
||||
return (void *)vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
EXPORT_SYMBOL(kmap_atomic_high);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
{
|
||||
|
|
|
@ -32,6 +32,7 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
|
|||
#include <asm/kmap_types.h>
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
extern void *kmap_atomic_high(struct page *page);
|
||||
#include <asm/highmem.h>
|
||||
|
||||
#ifndef ARCH_HAS_KMAP_FLUSH_TLB
|
||||
|
@ -62,6 +63,28 @@ static inline void kunmap(struct page *page)
|
|||
kunmap_high(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
|
||||
* no global lock is needed and because the kmap code must perform a global TLB
|
||||
* invalidation when the kmap pool wraps.
|
||||
*
|
||||
* However when holding an atomic kmap is is not legal to sleep, so atomic
|
||||
* kmaps are appropriate for short, tight code paths only.
|
||||
*
|
||||
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
|
||||
* gives a more generic (and caching) interface. But kmap_atomic can
|
||||
* be used in IRQ contexts, so in some (very limited) cases we need
|
||||
* it.
|
||||
*/
|
||||
static inline void *kmap_atomic(struct page *page)
|
||||
{
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
return kmap_atomic_high(page);
|
||||
}
|
||||
|
||||
/* declarations for linux/mm/highmem.c */
|
||||
unsigned int nr_free_highpages(void);
|
||||
extern atomic_long_t _totalhigh_pages;
|
||||
|
|
Loading…
Reference in New Issue