powerpc/mm/highmem: Switch to generic kmap atomic

No reason having the same code in every architecture

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20201103095858.087635810@linutronix.de
This commit is contained in:
Thomas Gleixner 2020-11-03 10:27:27 +01:00
parent 5f037ea3b2
commit 47da42b27a
7 changed files with 8 additions and 92 deletions

View File

@ -409,6 +409,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
depends on PPC32
select KMAP_LOCAL
source "kernel/Kconfig.hz"

View File

@ -20,7 +20,7 @@
#include <asm/page.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#include <asm/kmap_size.h>
#endif
#ifdef CONFIG_KASAN
@ -55,7 +55,7 @@ enum fixed_addresses {
FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
#endif
#ifdef CONFIG_PPC_8xx
/* For IMMR we need an aligned 512K area */

View File

@ -24,12 +24,10 @@
#ifdef __KERNEL__
#include <linux/interrupt.h>
#include <asm/kmap_types.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/fixmap.h>
extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table;
/*
@ -60,6 +58,11 @@ extern pte_t *pkmap_page_table;
#define flush_cache_kmaps() flush_cache_all()
#define arch_kmap_local_post_map(vaddr, pteval) \
local_flush_tlb_page(NULL, vaddr)
#define arch_kmap_local_post_unmap(vaddr) \
local_flush_tlb_page(NULL, vaddr)
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */

View File

@ -1,13 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_POWERPC_KMAP_TYPES_H
#define _ASM_POWERPC_KMAP_TYPES_H
#ifdef __KERNEL__
/*
*/
#define KM_TYPE_NR 16
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KMAP_TYPES_H */

View File

@ -16,7 +16,6 @@ obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
obj-$(CONFIG_PPC_PTDUMP) += ptdump/
obj-$(CONFIG_KASAN) += kasan/

View File

@ -1,67 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* highmem.c: virtual kernel memory mappings for high memory
*
* PowerPC version, stolen from the i386 version.
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terrabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*
* Reworked for PowerPC by various contributors. Moved from
* highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
*/
#include <linux/highmem.h>
#include <linux/module.h>
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
WARN_ON(IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !pte_none(*(kmap_pte - idx)));
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
local_flush_tlb_page(NULL, vaddr);
return (void*) vaddr;
}
EXPORT_SYMBOL(kmap_atomic_high_prot);
void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
if (vaddr < __fix_to_virt(FIX_KMAP_END))
return;
if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM)) {
int type = kmap_atomic_idx();
unsigned int idx;
idx = type + KM_TYPE_NR * smp_processor_id();
WARN_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_page(NULL, vaddr);
}
kmap_atomic_idx_pop();
}
EXPORT_SYMBOL(kunmap_atomic_high);

View File

@ -61,11 +61,6 @@
unsigned long long memory_limit;
bool init_mem_is_free;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
#endif
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@ -235,8 +230,6 @@ void __init paging_init(void)
map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
#endif /* CONFIG_HIGHMEM */
printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",