arm: Remove HYP/Stage-2 page-table support

Remove all traces of Stage-2 and HYP page table support.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Vladimir Murzin <vladimir.murzin@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
This commit is contained in:
Marc Zyngier 2020-01-24 23:11:46 +00:00
parent 541ad0150c
commit 3fbb96c054
8 changed files with 1 additions and 93 deletions

View File

@ -104,26 +104,6 @@
*/ */
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ #define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
/*
* 2nd stage PTE definitions for LPAE.
*/
#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
#define L_PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[1] */
#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
/*
* Hyp-mode PL2 PTE definitions for LPAE.
*/
#define L_PTE_HYP L_PTE_USER
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define pud_none(pud) (!pud_val(pud)) #define pud_none(pud) (!pud_val(pud))

View File

@ -80,9 +80,6 @@ extern void __pgd_error(const char *file, int line, pgd_t);
extern pgprot_t pgprot_user; extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel; extern pgprot_t pgprot_kernel;
extern pgprot_t pgprot_hyp_device;
extern pgprot_t pgprot_s2;
extern pgprot_t pgprot_s2_device;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
@ -95,12 +92,6 @@ extern pgprot_t pgprot_s2_device;
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel #define PAGE_KERNEL_EXEC pgprot_kernel
#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN)
#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY | L_PTE_XN)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)

View File

@ -10,8 +10,6 @@ extern char __idmap_text_start[];
extern char __idmap_text_end[]; extern char __idmap_text_end[];
extern char __entry_text_start[]; extern char __entry_text_start[];
extern char __entry_text_end[]; extern char __entry_text_end[];
extern char __hyp_idmap_text_start[];
extern char __hyp_idmap_text_end[];
static inline bool in_entry_text(unsigned long addr) static inline bool in_entry_text(unsigned long addr)
{ {
@ -22,9 +20,7 @@ static inline bool in_entry_text(unsigned long addr)
static inline bool in_idmap_text(unsigned long addr) static inline bool in_idmap_text(unsigned long addr)
{ {
void *a = (void *)addr; void *a = (void *)addr;
return memory_contains(__idmap_text_start, __idmap_text_end, a, 1) || return memory_contains(__idmap_text_start, __idmap_text_end, a, 1);
memory_contains(__hyp_idmap_text_start, __hyp_idmap_text_end,
a, 1);
} }
#endif /* _ASM_ARM_SECTIONS_H */ #endif /* _ASM_ARM_SECTIONS_H */

View File

@ -67,13 +67,6 @@ static inline bool is_kernel_in_hyp_mode(void)
return false; return false;
} }
/* The section containing the hypervisor idmap text */
extern char __hyp_idmap_text_start[];
extern char __hyp_idmap_text_end[];
/* The section containing the hypervisor text */
extern char __hyp_text_start[];
extern char __hyp_text_end[];
#endif #endif
#else #else

View File

@ -162,14 +162,6 @@ SECTIONS
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
/*
* The HYP init code can't be more than a page long,
* and should not cross a page boundary.
* The above comment applies as well.
*/
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
"HYP init code too big or misaligned")
#ifdef CONFIG_XIP_DEFLATED_DATA #ifdef CONFIG_XIP_DEFLATED_DATA
/* /*
* The .bss is used as a stack area for __inflate_kernel_data() whose stack * The .bss is used as a stack area for __inflate_kernel_data() whose stack

View File

@ -170,12 +170,4 @@ __start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
/*
* The HYP init code can't be more than a page long,
* and should not cross a page boundary.
* The above comment applies as well.
*/
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
"HYP init code too big or misaligned")
#endif /* CONFIG_XIP_KERNEL */ #endif /* CONFIG_XIP_KERNEL */

View File

@ -31,20 +31,11 @@
*(.proc.info.init) \ *(.proc.info.init) \
__proc_info_end = .; __proc_info_end = .;
#define HYPERVISOR_TEXT \
__hyp_text_start = .; \
*(.hyp.text) \
__hyp_text_end = .;
#define IDMAP_TEXT \ #define IDMAP_TEXT \
ALIGN_FUNCTION(); \ ALIGN_FUNCTION(); \
__idmap_text_start = .; \ __idmap_text_start = .; \
*(.idmap.text) \ *(.idmap.text) \
__idmap_text_end = .; \ __idmap_text_end = .; \
. = ALIGN(PAGE_SIZE); \
__hyp_idmap_text_start = .; \
*(.hyp.idmap.text) \
__hyp_idmap_text_end = .;
#define ARM_DISCARD \ #define ARM_DISCARD \
*(.ARM.exidx.exit.text) \ *(.ARM.exidx.exit.text) \
@ -72,7 +63,6 @@
SCHED_TEXT \ SCHED_TEXT \
CPUIDLE_TEXT \ CPUIDLE_TEXT \
LOCK_TEXT \ LOCK_TEXT \
HYPERVISOR_TEXT \
KPROBES_TEXT \ KPROBES_TEXT \
*(.gnu.warning) \ *(.gnu.warning) \
*(.glue_7) \ *(.glue_7) \

View File

@ -63,9 +63,6 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
static unsigned int ecc_mask __initdata = 0; static unsigned int ecc_mask __initdata = 0;
pgprot_t pgprot_user; pgprot_t pgprot_user;
pgprot_t pgprot_kernel; pgprot_t pgprot_kernel;
pgprot_t pgprot_hyp_device;
pgprot_t pgprot_s2;
pgprot_t pgprot_s2_device;
EXPORT_SYMBOL(pgprot_user); EXPORT_SYMBOL(pgprot_user);
EXPORT_SYMBOL(pgprot_kernel); EXPORT_SYMBOL(pgprot_kernel);
@ -75,15 +72,8 @@ struct cachepolicy {
unsigned int cr_mask; unsigned int cr_mask;
pmdval_t pmd; pmdval_t pmd;
pteval_t pte; pteval_t pte;
pteval_t pte_s2;
}; };
#ifdef CONFIG_ARM_LPAE
#define s2_policy(policy) policy
#else
#define s2_policy(policy) 0
#endif
unsigned long kimage_voffset __ro_after_init; unsigned long kimage_voffset __ro_after_init;
static struct cachepolicy cache_policies[] __initdata = { static struct cachepolicy cache_policies[] __initdata = {
@ -92,31 +82,26 @@ static struct cachepolicy cache_policies[] __initdata = {
.cr_mask = CR_W|CR_C, .cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED, .pmd = PMD_SECT_UNCACHED,
.pte = L_PTE_MT_UNCACHED, .pte = L_PTE_MT_UNCACHED,
.pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, { }, {
.policy = "buffered", .policy = "buffered",
.cr_mask = CR_C, .cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED, .pmd = PMD_SECT_BUFFERED,
.pte = L_PTE_MT_BUFFERABLE, .pte = L_PTE_MT_BUFFERABLE,
.pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, { }, {
.policy = "writethrough", .policy = "writethrough",
.cr_mask = 0, .cr_mask = 0,
.pmd = PMD_SECT_WT, .pmd = PMD_SECT_WT,
.pte = L_PTE_MT_WRITETHROUGH, .pte = L_PTE_MT_WRITETHROUGH,
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
}, { }, {
.policy = "writeback", .policy = "writeback",
.cr_mask = 0, .cr_mask = 0,
.pmd = PMD_SECT_WB, .pmd = PMD_SECT_WB,
.pte = L_PTE_MT_WRITEBACK, .pte = L_PTE_MT_WRITEBACK,
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
}, { }, {
.policy = "writealloc", .policy = "writealloc",
.cr_mask = 0, .cr_mask = 0,
.pmd = PMD_SECT_WBWA, .pmd = PMD_SECT_WBWA,
.pte = L_PTE_MT_WRITEALLOC, .pte = L_PTE_MT_WRITEALLOC,
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
} }
}; };
@ -246,9 +231,6 @@ static struct mem_type mem_types[] __ro_after_init = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED, L_PTE_SHARED,
.prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
s2_policy(L_PTE_S2_MT_DEV_SHARED) |
L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO, .domain = DOMAIN_IO,
@ -434,7 +416,6 @@ static void __init build_mem_type_table(void)
struct cachepolicy *cp; struct cachepolicy *cp;
unsigned int cr = get_cr(); unsigned int cr = get_cr();
pteval_t user_pgprot, kern_pgprot, vecs_pgprot; pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
int cpu_arch = cpu_architecture(); int cpu_arch = cpu_architecture();
int i; int i;
@ -558,9 +539,6 @@ static void __init build_mem_type_table(void)
*/ */
cp = &cache_policies[cachepolicy]; cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
s2_pgprot = cp->pte_s2;
hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
#ifndef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
/* /*
@ -604,7 +582,6 @@ static void __init build_mem_type_table(void)
user_pgprot |= L_PTE_SHARED; user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED;
s2_pgprot |= L_PTE_SHARED;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@ -666,9 +643,6 @@ static void __init build_mem_type_table(void)
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | kern_pgprot); L_PTE_DIRTY | kern_pgprot);
pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
pgprot_s2_device = __pgprot(s2_device_pgprot);
pgprot_hyp_device = __pgprot(hyp_device_pgprot);
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;