2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/mm/mmu.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-2005 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/init.h>
|
2015-06-01 19:40:32 +08:00
|
|
|
#include <linux/libfdt.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/nodemask.h>
|
|
|
|
#include <linux/memblock.h>
|
|
|
|
#include <linux/fs.h>
|
2012-10-23 21:55:08 +08:00
|
|
|
#include <linux/io.h>
|
2015-01-30 01:33:35 +08:00
|
|
|
#include <linux/slab.h>
|
2015-01-22 09:36:06 +08:00
|
|
|
#include <linux/stop_machine.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
#include <asm/cputype.h>
|
2014-11-22 05:50:42 +08:00
|
|
|
#include <asm/fixmap.h>
|
2015-10-19 21:19:28 +08:00
|
|
|
#include <asm/kernel-pgtable.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/sizes.h>
|
|
|
|
#include <asm/tlb.h>
|
2014-05-12 17:40:51 +08:00
|
|
|
#include <asm/memblock.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
|
|
|
|
#include "mm.h"
|
|
|
|
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-20 00:42:27 +08:00
|
|
|
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* Empty_zero_page is a special page that is used for zero-initialized data
|
|
|
|
* and COW.
|
|
|
|
*/
|
|
|
|
struct page *empty_zero_page;
|
|
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
|
|
|
|
|
|
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|
|
|
unsigned long size, pgprot_t vma_prot)
|
|
|
|
{
|
|
|
|
if (!pfn_valid(pfn))
|
|
|
|
return pgprot_noncached(vma_prot);
|
|
|
|
else if (file->f_flags & O_SYNC)
|
|
|
|
return pgprot_writecombine(vma_prot);
|
|
|
|
return vma_prot;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(phys_mem_access_prot);
|
|
|
|
|
|
|
|
static void __init *early_alloc(unsigned long sz)
|
|
|
|
{
|
2015-11-21 01:45:40 +08:00
|
|
|
phys_addr_t phys;
|
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
phys = memblock_alloc(sz, sz);
|
|
|
|
BUG_ON(!phys);
|
|
|
|
ptr = __va(phys);
|
2012-03-05 19:49:27 +08:00
|
|
|
memset(ptr, 0, sz);
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2015-01-22 09:36:06 +08:00
|
|
|
/*
|
|
|
|
* remap a PMD into pages
|
|
|
|
*/
|
|
|
|
static void split_pmd(pmd_t *pmd, pte_t *pte)
|
|
|
|
{
|
|
|
|
unsigned long pfn = pmd_pfn(*pmd);
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* Need to have the least restrictive permissions available
|
2015-11-26 23:42:41 +08:00
|
|
|
* permissions will be fixed up later
|
2015-01-22 09:36:06 +08:00
|
|
|
*/
|
2015-11-26 23:42:41 +08:00
|
|
|
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
|
2015-01-22 09:36:06 +08:00
|
|
|
pfn++;
|
|
|
|
} while (pte++, i++, i < PTRS_PER_PTE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
2015-11-26 23:42:41 +08:00
|
|
|
unsigned long end, unsigned long pfn,
|
2015-01-22 09:36:06 +08:00
|
|
|
pgprot_t prot,
|
|
|
|
void *(*alloc)(unsigned long size))
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
pte_t *pte;
|
|
|
|
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
|
2015-01-22 09:36:06 +08:00
|
|
|
pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
|
|
|
|
if (pmd_sect(*pmd))
|
|
|
|
split_pmd(pmd, pte);
|
2012-03-05 19:49:27 +08:00
|
|
|
__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
|
2015-01-22 09:36:06 +08:00
|
|
|
flush_tlb_all();
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
BUG_ON(pmd_bad(*pmd));
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
|
|
do {
|
2015-11-26 23:42:41 +08:00
|
|
|
set_pte(pte, pfn_pte(pfn, prot));
|
|
|
|
pfn++;
|
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2015-11-12 20:04:43 +08:00
|
|
|
static void split_pud(pud_t *old_pud, pmd_t *pmd)
|
2015-01-22 09:36:06 +08:00
|
|
|
{
|
|
|
|
unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
|
|
|
|
pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
do {
|
2015-07-01 00:04:49 +08:00
|
|
|
set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
|
2015-01-22 09:36:06 +08:00
|
|
|
addr += PMD_SIZE;
|
|
|
|
} while (pmd++, i++, i < PTRS_PER_PMD);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
|
2014-10-20 20:02:15 +08:00
|
|
|
unsigned long addr, unsigned long end,
|
2015-01-22 09:36:06 +08:00
|
|
|
phys_addr_t phys, pgprot_t prot,
|
|
|
|
void *(*alloc)(unsigned long size))
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for initial section mappings in the pgd/pud and remove them.
|
|
|
|
*/
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
if (pud_none(*pud) || pud_sect(*pud)) {
|
2015-01-22 09:36:06 +08:00
|
|
|
pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
|
|
|
|
if (pud_sect(*pud)) {
|
|
|
|
/*
|
|
|
|
* need to have the 1G of mappings continue to be
|
|
|
|
* present
|
|
|
|
*/
|
|
|
|
split_pud(pud, pmd);
|
|
|
|
}
|
2014-10-20 20:02:15 +08:00
|
|
|
pud_populate(mm, pud, pmd);
|
2015-01-22 09:36:06 +08:00
|
|
|
flush_tlb_all();
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
BUG_ON(pud_bad(*pud));
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
/* try section mapping first */
|
2014-02-05 00:01:31 +08:00
|
|
|
if (((addr | next | phys) & ~SECTION_MASK) == 0) {
|
|
|
|
pmd_t old_pmd =*pmd;
|
2014-10-20 21:42:07 +08:00
|
|
|
set_pmd(pmd, __pmd(phys |
|
|
|
|
pgprot_val(mk_sect_prot(prot))));
|
2014-02-05 00:01:31 +08:00
|
|
|
/*
|
|
|
|
* Check for previous table entries created during
|
|
|
|
* boot (__create_page_tables) and flush them.
|
|
|
|
*/
|
2014-12-09 15:26:47 +08:00
|
|
|
if (!pmd_none(old_pmd)) {
|
2014-02-05 00:01:31 +08:00
|
|
|
flush_tlb_all();
|
2014-12-09 15:26:47 +08:00
|
|
|
if (pmd_table(old_pmd)) {
|
|
|
|
phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
|
2015-01-30 01:33:35 +08:00
|
|
|
if (!WARN_ON_ONCE(slab_is_available()))
|
|
|
|
memblock_free(table, PAGE_SIZE);
|
2014-12-09 15:26:47 +08:00
|
|
|
}
|
|
|
|
}
|
2014-02-05 00:01:31 +08:00
|
|
|
} else {
|
2015-11-26 23:42:41 +08:00
|
|
|
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
|
|
|
|
prot, alloc);
|
2014-02-05 00:01:31 +08:00
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
phys += next - addr;
|
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
|
}
|
|
|
|
|
2015-01-22 09:36:06 +08:00
|
|
|
static inline bool use_1G_block(unsigned long addr, unsigned long next,
|
|
|
|
unsigned long phys)
|
|
|
|
{
|
|
|
|
if (PAGE_SHIFT != 12)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (((addr | next | phys) & ~PUD_MASK) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
|
2014-10-20 20:02:15 +08:00
|
|
|
unsigned long addr, unsigned long end,
|
2015-01-22 09:36:06 +08:00
|
|
|
phys_addr_t phys, pgprot_t prot,
|
|
|
|
void *(*alloc)(unsigned long size))
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2014-05-12 17:40:51 +08:00
|
|
|
pud_t *pud;
|
2012-03-05 19:49:27 +08:00
|
|
|
unsigned long next;
|
|
|
|
|
2014-05-12 17:40:51 +08:00
|
|
|
if (pgd_none(*pgd)) {
|
2015-01-22 09:36:06 +08:00
|
|
|
pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
|
2014-10-20 20:02:15 +08:00
|
|
|
pgd_populate(mm, pgd, pud);
|
2014-05-12 17:40:51 +08:00
|
|
|
}
|
|
|
|
BUG_ON(pgd_bad(*pgd));
|
|
|
|
|
|
|
|
pud = pud_offset(pgd, addr);
|
2012-03-05 19:49:27 +08:00
|
|
|
do {
|
|
|
|
next = pud_addr_end(addr, end);
|
2014-05-06 21:02:27 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For 4K granule only, attempt to put down a 1GB block
|
|
|
|
*/
|
2015-01-22 09:36:06 +08:00
|
|
|
if (use_1G_block(addr, next, phys)) {
|
2014-05-06 21:02:27 +08:00
|
|
|
pud_t old_pud = *pud;
|
2014-10-20 21:42:07 +08:00
|
|
|
set_pud(pud, __pud(phys |
|
|
|
|
pgprot_val(mk_sect_prot(prot))));
|
2014-05-06 21:02:27 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have an old value for a pud, it will
|
|
|
|
* be pointing to a pmd table that we no longer
|
|
|
|
* need (from swapper_pg_dir).
|
|
|
|
*
|
|
|
|
* Look up the old pmd table and free it.
|
|
|
|
*/
|
|
|
|
if (!pud_none(old_pud)) {
|
|
|
|
flush_tlb_all();
|
2014-12-09 15:26:47 +08:00
|
|
|
if (pud_table(old_pud)) {
|
|
|
|
phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
|
2015-01-30 01:33:35 +08:00
|
|
|
if (!WARN_ON_ONCE(slab_is_available()))
|
|
|
|
memblock_free(table, PAGE_SIZE);
|
2014-12-09 15:26:47 +08:00
|
|
|
}
|
2014-05-06 21:02:27 +08:00
|
|
|
}
|
|
|
|
} else {
|
2015-01-22 09:36:06 +08:00
|
|
|
alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
|
2014-05-06 21:02:27 +08:00
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
phys += next - addr;
|
|
|
|
} while (pud++, addr = next, addr != end);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create the page directory entries and any necessary page tables for the
|
|
|
|
* mapping specified by 'md'.
|
|
|
|
*/
|
2015-01-22 09:36:06 +08:00
|
|
|
static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
|
2014-10-20 20:02:15 +08:00
|
|
|
phys_addr_t phys, unsigned long virt,
|
2015-01-22 09:36:06 +08:00
|
|
|
phys_addr_t size, pgprot_t prot,
|
|
|
|
void *(*alloc)(unsigned long size))
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
unsigned long addr, length, end, next;
|
|
|
|
|
2015-11-23 21:26:19 +08:00
|
|
|
/*
|
|
|
|
* If the virtual and physical address don't have the same offset
|
|
|
|
* within a page, we cannot map the region as the caller expects.
|
|
|
|
*/
|
|
|
|
if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
|
|
|
|
return;
|
|
|
|
|
2015-11-23 21:26:20 +08:00
|
|
|
phys &= PAGE_MASK;
|
2012-03-05 19:49:27 +08:00
|
|
|
addr = virt & PAGE_MASK;
|
|
|
|
length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
|
|
|
|
|
|
|
|
end = addr + length;
|
|
|
|
do {
|
|
|
|
next = pgd_addr_end(addr, end);
|
2015-01-22 09:36:06 +08:00
|
|
|
alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
|
2012-03-05 19:49:27 +08:00
|
|
|
phys += next - addr;
|
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
|
}
|
|
|
|
|
2015-01-22 09:36:06 +08:00
|
|
|
static void *late_alloc(unsigned long size)
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
BUG_ON(size > PAGE_SIZE);
|
|
|
|
ptr = (void *)__get_free_page(PGALLOC_GFP);
|
|
|
|
BUG_ON(!ptr);
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2015-07-28 17:31:06 +08:00
|
|
|
static void __init create_mapping(phys_addr_t phys, unsigned long virt,
|
2015-01-22 09:36:06 +08:00
|
|
|
phys_addr_t size, pgprot_t prot)
|
2014-03-13 00:28:06 +08:00
|
|
|
{
|
|
|
|
if (virt < VMALLOC_START) {
|
|
|
|
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
|
|
|
|
&phys, virt);
|
|
|
|
return;
|
|
|
|
}
|
2015-12-09 20:44:36 +08:00
|
|
|
__create_mapping(&init_mm, pgd_offset_k(virt), phys, virt,
|
2015-01-22 09:36:06 +08:00
|
|
|
size, prot, early_alloc);
|
2014-03-13 00:28:06 +08:00
|
|
|
}
|
|
|
|
|
2014-10-20 21:42:07 +08:00
|
|
|
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
|
|
|
unsigned long virt, phys_addr_t size,
|
|
|
|
pgprot_t prot)
|
|
|
|
{
|
2015-01-22 09:36:06 +08:00
|
|
|
__create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
|
2015-01-22 18:01:40 +08:00
|
|
|
late_alloc);
|
2014-03-13 00:28:06 +08:00
|
|
|
}
|
|
|
|
|
2015-01-22 09:36:06 +08:00
|
|
|
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
|
|
|
|
phys_addr_t size, pgprot_t prot)
|
|
|
|
{
|
|
|
|
if (virt < VMALLOC_START) {
|
|
|
|
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
|
|
|
|
&phys, virt);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-12-09 20:44:36 +08:00
|
|
|
return __create_mapping(&init_mm, pgd_offset_k(virt),
|
2015-01-22 09:36:06 +08:00
|
|
|
phys, virt, size, prot, late_alloc);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_RODATA
|
|
|
|
static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Set up the executable regions using the existing section mappings
|
|
|
|
* for now. This will get more fine grained later once all memory
|
|
|
|
* is mapped
|
|
|
|
*/
|
2015-11-16 18:18:14 +08:00
|
|
|
unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
|
|
|
|
unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
|
2015-01-22 09:36:06 +08:00
|
|
|
|
|
|
|
if (end < kernel_x_start) {
|
|
|
|
create_mapping(start, __phys_to_virt(start),
|
|
|
|
end - start, PAGE_KERNEL);
|
|
|
|
} else if (start >= kernel_x_end) {
|
|
|
|
create_mapping(start, __phys_to_virt(start),
|
|
|
|
end - start, PAGE_KERNEL);
|
|
|
|
} else {
|
|
|
|
if (start < kernel_x_start)
|
|
|
|
create_mapping(start, __phys_to_virt(start),
|
|
|
|
kernel_x_start - start,
|
|
|
|
PAGE_KERNEL);
|
|
|
|
create_mapping(kernel_x_start,
|
|
|
|
__phys_to_virt(kernel_x_start),
|
|
|
|
kernel_x_end - kernel_x_start,
|
|
|
|
PAGE_KERNEL_EXEC);
|
|
|
|
if (kernel_x_end < end)
|
|
|
|
create_mapping(kernel_x_end,
|
|
|
|
__phys_to_virt(kernel_x_end),
|
|
|
|
end - kernel_x_end,
|
|
|
|
PAGE_KERNEL);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
|
|
|
|
{
|
|
|
|
create_mapping(start, __phys_to_virt(start), end - start,
|
|
|
|
PAGE_KERNEL_EXEC);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
static void __init map_mem(void)
|
|
|
|
{
|
|
|
|
struct memblock_region *reg;
|
2013-08-24 01:04:44 +08:00
|
|
|
phys_addr_t limit;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2013-04-30 18:00:33 +08:00
|
|
|
/*
|
|
|
|
* Temporarily limit the memblock range. We need to do this as
|
|
|
|
* create_mapping requires puds, pmds and ptes to be allocated from
|
|
|
|
* memory addressable from the initial direct kernel mapping.
|
|
|
|
*
|
2014-10-25 01:16:47 +08:00
|
|
|
* The initial direct kernel mapping, located at swapper_pg_dir, gives
|
2015-10-19 21:19:28 +08:00
|
|
|
* us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
|
|
|
|
* memory starting from PHYS_OFFSET (which must be aligned to 2MB as
|
|
|
|
* per Documentation/arm64/booting.txt).
|
2013-04-30 18:00:33 +08:00
|
|
|
*/
|
2015-10-19 21:19:28 +08:00
|
|
|
limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
|
2013-08-24 01:04:44 +08:00
|
|
|
memblock_set_current_limit(limit);
|
2013-04-30 18:00:33 +08:00
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
/* map all the memory banks */
|
|
|
|
for_each_memblock(memory, reg) {
|
|
|
|
phys_addr_t start = reg->base;
|
|
|
|
phys_addr_t end = start + reg->size;
|
|
|
|
|
|
|
|
if (start >= end)
|
|
|
|
break;
|
2015-11-30 20:28:16 +08:00
|
|
|
if (memblock_is_nomap(reg))
|
|
|
|
continue;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2015-10-19 21:19:28 +08:00
|
|
|
if (ARM64_SWAPPER_USES_SECTION_MAPS) {
|
|
|
|
/*
|
|
|
|
* For the first memory bank align the start address and
|
|
|
|
* current memblock limit to prevent create_mapping() from
|
|
|
|
* allocating pte page tables from unmapped memory. With
|
|
|
|
* the section maps, if the first block doesn't end on section
|
|
|
|
* size boundary, create_mapping() will try to allocate a pte
|
|
|
|
* page, which may be returned from an unmapped area.
|
|
|
|
* When section maps are not used, the pte page table for the
|
|
|
|
* current limit is already present in swapper_pg_dir.
|
|
|
|
*/
|
|
|
|
if (start < limit)
|
|
|
|
start = ALIGN(start, SECTION_SIZE);
|
|
|
|
if (end < limit) {
|
|
|
|
limit = end & SECTION_MASK;
|
|
|
|
memblock_set_current_limit(limit);
|
|
|
|
}
|
2013-08-24 01:04:44 +08:00
|
|
|
}
|
2015-01-22 09:36:06 +08:00
|
|
|
__map_memblock(start, end);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
2013-04-30 18:00:33 +08:00
|
|
|
|
|
|
|
/* Limit no longer required. */
|
|
|
|
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2015-11-12 20:04:43 +08:00
|
|
|
static void __init fixup_executable(void)
|
2015-01-22 09:36:06 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_DEBUG_RODATA
|
|
|
|
/* now that we are actually fully mapped, make the start/end more fine grained */
|
2015-11-16 18:18:14 +08:00
|
|
|
if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
|
2015-01-22 09:36:06 +08:00
|
|
|
unsigned long aligned_start = round_down(__pa(_stext),
|
2015-11-16 18:18:14 +08:00
|
|
|
SWAPPER_BLOCK_SIZE);
|
2015-01-22 09:36:06 +08:00
|
|
|
|
|
|
|
create_mapping(aligned_start, __phys_to_virt(aligned_start),
|
|
|
|
__pa(_stext) - aligned_start,
|
|
|
|
PAGE_KERNEL);
|
|
|
|
}
|
|
|
|
|
2015-11-16 18:18:14 +08:00
|
|
|
if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
|
2015-01-22 09:36:06 +08:00
|
|
|
unsigned long aligned_end = round_up(__pa(__init_end),
|
2015-11-16 18:18:14 +08:00
|
|
|
SWAPPER_BLOCK_SIZE);
|
2015-01-22 09:36:06 +08:00
|
|
|
create_mapping(__pa(__init_end), (unsigned long)__init_end,
|
|
|
|
aligned_end - __pa(__init_end),
|
|
|
|
PAGE_KERNEL);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_RODATA
|
|
|
|
void mark_rodata_ro(void)
|
|
|
|
{
|
|
|
|
create_mapping_late(__pa(_stext), (unsigned long)_stext,
|
|
|
|
(unsigned long)_etext - (unsigned long)_stext,
|
2015-11-13 04:21:10 +08:00
|
|
|
PAGE_KERNEL_ROX);
|
2015-01-22 09:36:06 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void fixup_init(void)
|
|
|
|
{
|
|
|
|
create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
|
|
|
|
(unsigned long)__init_end - (unsigned long)__init_begin,
|
|
|
|
PAGE_KERNEL);
|
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* paging_init() sets up the page tables, initialises the zone memory
|
|
|
|
* maps and sets up the zero page.
|
|
|
|
*/
|
|
|
|
void __init paging_init(void)
|
|
|
|
{
|
|
|
|
void *zero_page;
|
|
|
|
|
|
|
|
map_mem();
|
2015-01-22 09:36:06 +08:00
|
|
|
fixup_executable();
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
/* allocate the zero page. */
|
|
|
|
zero_page = early_alloc(PAGE_SIZE);
|
|
|
|
|
|
|
|
bootmem_init();
|
|
|
|
|
|
|
|
empty_zero_page = virt_to_page(zero_page);
|
|
|
|
|
2015-12-11 00:05:36 +08:00
|
|
|
/* Ensure the zero page is visible to the page table walker */
|
|
|
|
dsb(ishst);
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* TTBR0 is only used for the identity mapping at this stage. Make it
|
|
|
|
* point to zero page to avoid speculatively fetching new entries.
|
|
|
|
*/
|
|
|
|
cpu_set_reserved_ttbr0();
|
2015-10-07 01:46:23 +08:00
|
|
|
local_flush_tlb_all();
|
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2015-03-20 00:42:27 +08:00
|
|
|
cpu_set_default_tcr_t0sz();
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether a kernel address is valid (derived from arch/x86/).
|
|
|
|
*/
|
|
|
|
int kern_addr_valid(unsigned long addr)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
if ((((long)addr) >> VA_BITS) != -1UL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
|
|
if (pgd_none(*pgd))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
|
if (pud_none(*pud))
|
|
|
|
return 0;
|
|
|
|
|
2014-05-06 21:02:27 +08:00
|
|
|
if (pud_sect(*pud))
|
|
|
|
return pfn_valid(pud_pfn(*pud));
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
if (pmd_none(*pmd))
|
|
|
|
return 0;
|
|
|
|
|
2014-04-16 01:53:24 +08:00
|
|
|
if (pmd_sect(*pmd))
|
|
|
|
return pfn_valid(pmd_pfn(*pmd));
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
|
|
if (pte_none(*pte))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return pfn_valid(pte_pfn(*pte));
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
2015-10-19 21:19:28 +08:00
|
|
|
#if !ARM64_SWAPPER_USES_SECTION_MAPS
|
2013-04-30 06:07:50 +08:00
|
|
|
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2013-04-30 06:07:50 +08:00
|
|
|
return vmemmap_populate_basepages(start, end, node);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
2015-10-19 21:19:28 +08:00
|
|
|
#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
|
2013-04-30 06:07:50 +08:00
|
|
|
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2013-04-30 06:07:50 +08:00
|
|
|
unsigned long addr = start;
|
2012-03-05 19:49:27 +08:00
|
|
|
unsigned long next;
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
|
|
|
|
pgd = vmemmap_pgd_populate(addr, node);
|
|
|
|
if (!pgd)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pud = vmemmap_pud_populate(pgd, addr, node);
|
|
|
|
if (!pud)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
if (pmd_none(*pmd)) {
|
|
|
|
void *p = NULL;
|
|
|
|
|
|
|
|
p = vmemmap_alloc_block_buf(PMD_SIZE, node);
|
|
|
|
if (!p)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-04-03 22:57:15 +08:00
|
|
|
set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
|
2012-03-05 19:49:27 +08:00
|
|
|
} else
|
|
|
|
vmemmap_verify((pte_t *)pmd, node, addr, next);
|
|
|
|
} while (addr = next, addr != end);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_ARM64_64K_PAGES */
|
2013-04-30 06:07:50 +08:00
|
|
|
void vmemmap_free(unsigned long start, unsigned long end)
|
2013-02-23 08:33:08 +08:00
|
|
|
{
|
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
2014-11-22 05:50:42 +08:00
|
|
|
|
|
|
|
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
|
2015-04-15 06:45:39 +08:00
|
|
|
#if CONFIG_PGTABLE_LEVELS > 2
|
2014-11-22 05:50:42 +08:00
|
|
|
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
|
|
|
|
#endif
|
2015-04-15 06:45:39 +08:00
|
|
|
#if CONFIG_PGTABLE_LEVELS > 3
|
2014-11-22 05:50:42 +08:00
|
|
|
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static inline pud_t * fixmap_pud(unsigned long addr)
|
|
|
|
{
|
|
|
|
pgd_t *pgd = pgd_offset_k(addr);
|
|
|
|
|
|
|
|
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
|
|
|
|
|
|
|
|
return pud_offset(pgd, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t * fixmap_pmd(unsigned long addr)
|
|
|
|
{
|
|
|
|
pud_t *pud = fixmap_pud(addr);
|
|
|
|
|
|
|
|
BUG_ON(pud_none(*pud) || pud_bad(*pud));
|
|
|
|
|
|
|
|
return pmd_offset(pud, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t * fixmap_pte(unsigned long addr)
|
|
|
|
{
|
|
|
|
pmd_t *pmd = fixmap_pmd(addr);
|
|
|
|
|
|
|
|
BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
|
|
|
|
|
|
|
|
return pte_offset_kernel(pmd, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init early_fixmap_init(void)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long addr = FIXADDR_START;
|
|
|
|
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
|
|
pgd_populate(&init_mm, pgd, bm_pud);
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
|
pud_populate(&init_mm, pud, bm_pmd);
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
pmd_populate_kernel(&init_mm, pmd, bm_pte);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The boot-ioremap range spans multiple pmds, for which
|
|
|
|
* we are not preparted:
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
|
|
|
|
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
|
|
|
|
|
|
|
|
if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
|
|
|
|
|| pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
|
|
|
|
WARN_ON(1);
|
|
|
|
pr_warn("pmd %p != %p, %p\n",
|
|
|
|
pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
|
|
|
|
fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
|
|
|
|
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
|
|
|
|
fix_to_virt(FIX_BTMAP_BEGIN));
|
|
|
|
pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
|
|
|
|
fix_to_virt(FIX_BTMAP_END));
|
|
|
|
|
|
|
|
pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
|
|
|
|
pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void __set_fixmap(enum fixed_addresses idx,
|
|
|
|
phys_addr_t phys, pgprot_t flags)
|
|
|
|
{
|
|
|
|
unsigned long addr = __fix_to_virt(idx);
|
|
|
|
pte_t *pte;
|
|
|
|
|
2015-03-04 21:27:35 +08:00
|
|
|
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
|
2014-11-22 05:50:42 +08:00
|
|
|
|
|
|
|
pte = fixmap_pte(addr);
|
|
|
|
|
|
|
|
if (pgprot_val(flags)) {
|
|
|
|
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
|
|
|
|
} else {
|
|
|
|
pte_clear(&init_mm, addr, pte);
|
|
|
|
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
|
|
|
|
}
|
|
|
|
}
|
2015-06-01 19:40:32 +08:00
|
|
|
|
|
|
|
void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
|
|
|
|
{
|
|
|
|
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
|
2015-11-09 16:55:46 +08:00
|
|
|
pgprot_t prot = PAGE_KERNEL_RO;
|
2015-10-19 21:19:28 +08:00
|
|
|
int size, offset;
|
2015-06-01 19:40:32 +08:00
|
|
|
void *dt_virt;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether the physical FDT address is set and meets the minimum
|
|
|
|
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
|
|
|
|
* at least 8 bytes so that we can always access the size field of the
|
|
|
|
* FDT header after mapping the first chunk, double check here if that
|
|
|
|
* is indeed the case.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
|
|
|
|
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that the FDT region can be mapped without the need to
|
|
|
|
* allocate additional translation table pages, so that it is safe
|
|
|
|
* to call create_mapping() this early.
|
|
|
|
*
|
|
|
|
* On 64k pages, the FDT will be mapped using PTEs, so we need to
|
|
|
|
* be in the same PMD as the rest of the fixmap.
|
|
|
|
* On 4k pages, we'll use section mappings for the FDT so we only
|
|
|
|
* have to be in the same PUD.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(dt_virt_base % SZ_2M);
|
|
|
|
|
2015-10-19 21:19:28 +08:00
|
|
|
BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
|
|
|
|
__fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
|
2015-06-01 19:40:32 +08:00
|
|
|
|
2015-10-19 21:19:28 +08:00
|
|
|
offset = dt_phys % SWAPPER_BLOCK_SIZE;
|
2015-06-01 19:40:32 +08:00
|
|
|
dt_virt = (void *)dt_virt_base + offset;
|
|
|
|
|
|
|
|
/* map the first chunk so we can read the size from the header */
|
2015-10-19 21:19:28 +08:00
|
|
|
create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
|
|
|
|
SWAPPER_BLOCK_SIZE, prot);
|
2015-06-01 19:40:32 +08:00
|
|
|
|
|
|
|
if (fdt_check_header(dt_virt) != 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
size = fdt_totalsize(dt_virt);
|
|
|
|
if (size > MAX_FDT_SIZE)
|
|
|
|
return NULL;
|
|
|
|
|
2015-10-19 21:19:28 +08:00
|
|
|
if (offset + size > SWAPPER_BLOCK_SIZE)
|
|
|
|
create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
|
|
|
|
round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
|
2015-06-01 19:40:32 +08:00
|
|
|
|
|
|
|
memblock_reserve(dt_phys, size);
|
|
|
|
|
|
|
|
return dt_virt;
|
|
|
|
}
|