2009-05-26 22:30:15 +08:00
|
|
|
/*
|
|
|
|
* This file contains the routines setting up the linux page tables.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2008 Michal Simek
|
|
|
|
* Copyright (C) 2008 PetaLogix
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* Derived from arch/ppc/mm/pgtable.c:
|
|
|
|
* -- paulus
|
|
|
|
*
|
|
|
|
* Derived from arch/ppc/mm/init.c:
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
|
|
*
|
|
|
|
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
|
|
|
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
|
|
|
* Copyright (C) 1996 Paul Mackerras
|
|
|
|
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
|
|
|
*
|
|
|
|
* Derived from "arch/i386/mm/init.c"
|
|
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General
|
|
|
|
* Public License. See the file COPYING in the main directory of this
|
|
|
|
* archive for more details.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2013-02-01 20:10:35 +08:00
|
|
|
#include <linux/export.h>
|
2009-05-26 22:30:15 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/init.h>
|
2017-02-04 07:16:44 +08:00
|
|
|
#include <linux/mm_types.h>
|
2009-05-26 22:30:15 +08:00
|
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
#include <asm/sections.h>
|
2011-12-15 21:33:32 +08:00
|
|
|
#include <asm/fixmap.h>
|
2009-05-26 22:30:15 +08:00
|
|
|
|
|
|
|
unsigned long ioremap_base;
|
|
|
|
unsigned long ioremap_bot;
|
2010-05-13 18:11:42 +08:00
|
|
|
EXPORT_SYMBOL(ioremap_bot);
|
2009-05-26 22:30:15 +08:00
|
|
|
|
|
|
|
static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
unsigned long v, i;
|
|
|
|
phys_addr_t p;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Choose an address to map it to.
|
|
|
|
* Once the vmalloc system is running, we use it.
|
|
|
|
* Before then, we use space going down from ioremap_base
|
|
|
|
* (ioremap_bot records where we're up to).
|
|
|
|
*/
|
|
|
|
p = addr & PAGE_MASK;
|
|
|
|
size = PAGE_ALIGN(addr + size) - p;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't allow anybody to remap normal RAM that we're using.
|
|
|
|
* mem_init() sets high_memory so only do the check after that.
|
|
|
|
*
|
|
|
|
* However, allow remap of rootfs: TBD
|
|
|
|
*/
|
2013-02-07 22:12:24 +08:00
|
|
|
|
2009-05-26 22:30:15 +08:00
|
|
|
if (mem_init_done &&
|
|
|
|
p >= memory_start && p < virt_to_phys(high_memory) &&
|
2013-02-07 22:12:24 +08:00
|
|
|
!(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
|
|
|
|
p < __virt_to_phys((phys_addr_t)__bss_stop))) {
|
2019-03-26 03:32:28 +08:00
|
|
|
pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %ps\n",
|
2012-12-27 17:40:38 +08:00
|
|
|
(unsigned long)p, __builtin_return_address(0));
|
2009-05-26 22:30:15 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Is it already mapped? If the whole area is mapped then we're
|
|
|
|
* done, otherwise remap it since we want to keep the virt addrs for
|
|
|
|
* each request contiguous.
|
|
|
|
*
|
|
|
|
* We make the assumption here that if the bottom and top
|
|
|
|
* of the range we want are mapped then it's mapped to the
|
|
|
|
* same virt address (and this is contiguous).
|
|
|
|
* -- Cort
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (mem_init_done) {
|
|
|
|
struct vm_struct *area;
|
|
|
|
area = get_vm_area(size, VM_IOREMAP);
|
|
|
|
if (area == NULL)
|
|
|
|
return NULL;
|
2010-02-22 18:27:27 +08:00
|
|
|
v = (unsigned long) area->addr;
|
2009-05-26 22:30:15 +08:00
|
|
|
} else {
|
|
|
|
v = (ioremap_bot -= size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((flags & _PAGE_PRESENT) == 0)
|
|
|
|
flags |= _PAGE_KERNEL;
|
|
|
|
if (flags & _PAGE_NO_CACHE)
|
|
|
|
flags |= _PAGE_GUARDED;
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
|
|
|
|
err = map_page(v + i, p + i, flags);
|
|
|
|
if (err) {
|
|
|
|
if (mem_init_done)
|
|
|
|
vfree((void *)v);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
|
|
|
|
}
|
|
|
|
|
|
|
|
void __iomem *ioremap(phys_addr_t addr, unsigned long size)
|
|
|
|
{
|
|
|
|
return __ioremap(addr, size, _PAGE_NO_CACHE);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ioremap);
|
|
|
|
|
2018-01-02 19:47:20 +08:00
|
|
|
void iounmap(volatile void __iomem *addr)
|
2009-05-26 22:30:15 +08:00
|
|
|
{
|
2012-12-27 17:40:38 +08:00
|
|
|
if ((__force void *)addr > high_memory &&
|
|
|
|
(unsigned long) addr < ioremap_bot)
|
2009-05-26 22:30:15 +08:00
|
|
|
vfree((void *) (PAGE_MASK & (unsigned long) addr));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(iounmap);
|
|
|
|
|
|
|
|
|
|
|
|
int map_page(unsigned long va, phys_addr_t pa, int flags)
|
|
|
|
{
|
2019-12-05 08:54:03 +08:00
|
|
|
p4d_t *p4d;
|
|
|
|
pud_t *pud;
|
2009-05-26 22:30:15 +08:00
|
|
|
pmd_t *pd;
|
|
|
|
pte_t *pg;
|
|
|
|
int err = -ENOMEM;
|
2019-12-05 08:54:03 +08:00
|
|
|
|
2009-05-26 22:30:15 +08:00
|
|
|
/* Use upper 10 bits of VA to index the first level map */
|
2019-12-05 08:54:03 +08:00
|
|
|
p4d = p4d_offset(pgd_offset_k(va), va);
|
|
|
|
pud = pud_offset(p4d, va);
|
|
|
|
pd = pmd_offset(pud, va);
|
2009-05-26 22:30:15 +08:00
|
|
|
/* Use middle 10 bits of VA to index the second-level map */
|
|
|
|
pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
|
|
|
|
/* pg = pte_alloc_kernel(&init_mm, pd, va); */
|
|
|
|
|
|
|
|
if (pg != NULL) {
|
|
|
|
err = 0;
|
|
|
|
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
|
|
|
|
__pgprot(flags)));
|
2010-03-23 22:37:02 +08:00
|
|
|
if (unlikely(mem_init_done))
|
2012-12-27 17:40:38 +08:00
|
|
|
_tlbie(va);
|
2009-05-26 22:30:15 +08:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map in all of physical memory starting at CONFIG_KERNEL_START.
|
|
|
|
*/
|
|
|
|
void __init mapin_ram(void)
|
|
|
|
{
|
|
|
|
unsigned long v, p, s, f;
|
|
|
|
|
|
|
|
v = CONFIG_KERNEL_START;
|
|
|
|
p = memory_start;
|
2011-12-22 19:33:24 +08:00
|
|
|
for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
|
2009-05-26 22:30:15 +08:00
|
|
|
f = _PAGE_PRESENT | _PAGE_ACCESSED |
|
|
|
|
_PAGE_SHARED | _PAGE_HWEXEC;
|
|
|
|
if ((char *) v < _stext || (char *) v >= _etext)
|
|
|
|
f |= _PAGE_WRENABLE;
|
|
|
|
else
|
|
|
|
/* On the MicroBlaze, no user access
|
|
|
|
forces R/W kernel access */
|
|
|
|
f |= _PAGE_USER;
|
|
|
|
map_page(v, p, f);
|
|
|
|
v += PAGE_SIZE;
|
|
|
|
p += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* is x a power of 2? */
|
|
|
|
#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
|
|
|
|
|
|
|
|
/* Scan the real Linux page tables and return a PTE pointer for
|
|
|
|
* a virtual address in a context.
|
|
|
|
* Returns true (1) if PTE was found, zero otherwise. The pointer to
|
|
|
|
* the PTE pointer is unmodified if PTE is not found.
|
|
|
|
*/
|
|
|
|
static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
2019-12-05 08:54:03 +08:00
|
|
|
p4d_t *p4d;
|
|
|
|
pud_t *pud;
|
2009-05-26 22:30:15 +08:00
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
int retval = 0;
|
|
|
|
|
|
|
|
pgd = pgd_offset(mm, addr & PAGE_MASK);
|
|
|
|
if (pgd) {
|
2019-12-05 08:54:03 +08:00
|
|
|
p4d = p4d_offset(pgd, addr & PAGE_MASK);
|
|
|
|
pud = pud_offset(p4d, addr & PAGE_MASK);
|
|
|
|
pmd = pmd_offset(pud, addr & PAGE_MASK);
|
2009-05-26 22:30:15 +08:00
|
|
|
if (pmd_present(*pmd)) {
|
|
|
|
pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
|
|
|
|
if (pte) {
|
|
|
|
retval = 1;
|
|
|
|
*ptep = pte;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find physical address for this virtual address. Normally used by
|
|
|
|
* I/O functions, but anyone can call it.
|
|
|
|
*/
|
|
|
|
unsigned long iopa(unsigned long addr)
|
|
|
|
{
|
|
|
|
unsigned long pa;
|
|
|
|
|
|
|
|
pte_t *pte;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
|
|
|
|
/* Allow mapping of user addresses (within the thread)
|
|
|
|
* for DMA if necessary.
|
|
|
|
*/
|
|
|
|
if (addr < TASK_SIZE)
|
|
|
|
mm = current->mm;
|
|
|
|
else
|
|
|
|
mm = &init_mm;
|
|
|
|
|
|
|
|
pa = 0;
|
|
|
|
if (get_pteptr(mm, addr, &pte))
|
|
|
|
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
|
|
|
|
|
|
|
|
return pa;
|
|
|
|
}
|
2010-04-13 14:59:37 +08:00
|
|
|
|
mm: treewide: remove unused address argument from pte_alloc functions
Patch series "Add support for fast mremap".
This series speeds up the mremap(2) syscall by copying page tables at
the PMD level even for non-THP systems. There is concern that the extra
'address' argument that mremap passes to pte_alloc may do something
subtle architecture related in the future that may make the scheme not
work. Also we find that there is no point in passing the 'address' to
pte_alloc since its unused. This patch therefore removes this argument
tree-wide resulting in a nice negative diff as well. Also ensuring
along the way that the enabled architectures do not do anything funky
with the 'address' argument that goes unnoticed by the optimization.
Build and boot tested on x86-64. Build tested on arm64. The config
enablement patch for arm64 will be posted in the future after more
testing.
The changes were obtained by applying the following Coccinelle script.
(thanks Julia for answering all Coccinelle questions!).
Following fix ups were done manually:
* Removal of address argument from pte_fragment_alloc
* Removal of pte_alloc_one_fast definitions from m68k and microblaze.
// Options: --include-headers --no-includes
// Note: I split the 'identifier fn' line, so if you are manually
// running it, please unsplit it so it runs for you.
virtual patch
@pte_alloc_func_def depends on patch exists@
identifier E2;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
type T2;
@@
fn(...
- , T2 E2
)
{ ... }
@pte_alloc_func_proto_noarg depends on patch exists@
type T1, T2, T3, T4;
identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@
(
- T3 fn(T1, T2);
+ T3 fn(T1);
|
- T3 fn(T1, T2, T4);
+ T3 fn(T1, T2);
)
@pte_alloc_func_proto depends on patch exists@
identifier E1, E2, E4;
type T1, T2, T3, T4;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@
(
- T3 fn(T1 E1, T2 E2);
+ T3 fn(T1 E1);
|
- T3 fn(T1 E1, T2 E2, T4 E4);
+ T3 fn(T1 E1, T2 E2);
)
@pte_alloc_func_call depends on patch exists@
expression E2;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@
fn(...
-, E2
)
@pte_alloc_macro depends on patch exists@
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
identifier a, b, c;
expression e;
position p;
@@
(
- #define fn(a, b, c) e
+ #define fn(a, b) e
|
- #define fn(a, b) e
+ #define fn(a) e
)
Link: http://lkml.kernel.org/r/20181108181201.88826-2-joelaf@google.com
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Suggested-by: Kirill A. Shutemov <kirill@shutemov.name>
Acked-by: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Julia Lawall <Julia.Lawall@lip6.fr>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-04 07:28:34 +08:00
|
|
|
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
|
2010-04-13 14:59:37 +08:00
|
|
|
{
|
|
|
|
pte_t *pte;
|
|
|
|
if (mem_init_done) {
|
2016-06-25 05:48:47 +08:00
|
|
|
pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
2010-04-13 14:59:37 +08:00
|
|
|
} else {
|
|
|
|
pte = (pte_t *)early_get_page();
|
|
|
|
if (pte)
|
|
|
|
clear_page(pte);
|
|
|
|
}
|
|
|
|
return pte;
|
|
|
|
}
|
2011-12-15 21:33:32 +08:00
|
|
|
|
|
|
|
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
|
|
|
|
{
|
|
|
|
unsigned long address = __fix_to_virt(idx);
|
|
|
|
|
|
|
|
if (idx >= __end_of_fixed_addresses)
|
|
|
|
BUG();
|
|
|
|
|
|
|
|
map_page(address, phys, pgprot_val(flags));
|
|
|
|
}
|