diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt index 38dc06d0a791..4178ebda6e66 100644 --- a/Documentation/arm/memory.txt +++ b/Documentation/arm/memory.txt @@ -41,7 +41,7 @@ fffe8000 fffeffff DTCM mapping area for platforms with fffe0000 fffe7fff ITCM mapping area for platforms with ITCM mounted inside the CPU. -ffc00000 ffdfffff Fixmap mapping region. Addresses provided +ffc00000 ffefffff Fixmap mapping region. Addresses provided by fix_to_virt() will be located here. fee00000 feffffff Mapping of PCI I/O space. This is a static diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 10e78d00a0bb..2d46862e7bef 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -487,6 +487,16 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void); +void set_kernel_text_rw(void); +void set_kernel_text_ro(void); +#else +static inline void set_kernel_text_rw(void) { } +static inline void set_kernel_text_ro(void) { } +#endif + void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, void *kaddr, unsigned long len); + #endif diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index 74124b0d0d79..0415eae1df27 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -2,27 +2,24 @@ #define _ASM_FIXMAP_H #define FIXADDR_START 0xffc00000UL -#define FIXADDR_TOP 0xffe00000UL -#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START) +#define FIXADDR_END 0xfff00000UL +#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) -#define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT) +#include -#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) +enum fixed_addresses { + FIX_KMAP_BEGIN, + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, -extern void __this_fixmap_does_not_exist(void); + /* Support writing RO kernel text via kprobes, jump labels, etc. */ + FIX_TEXT_POKE0, + FIX_TEXT_POKE1, -static inline unsigned long fix_to_virt(const unsigned int idx) -{ - if (idx >= FIX_KMAP_NR_PTES) - __this_fixmap_does_not_exist(); - return __fix_to_virt(idx); -} + __end_of_fixed_addresses +}; -static inline unsigned int virt_to_fix(const unsigned long vaddr) -{ - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); - return __virt_to_fix(vaddr); -} +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); + +#include #endif diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 64886387c0bf..2ac3920e1793 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -68,7 +68,7 @@ test-kprobes-objs += kprobes-test-arm.o endif obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o obj-$(CONFIG_ARM_THUMBEE) += thumbee.o -obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_KGDB) += kgdb.o patch.o obj-$(CONFIG_ARM_UNWIND) += unwind.o obj-$(CONFIG_HAVE_TCM) += tcm.o obj-$(CONFIG_OF) += devtree.o diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index af9a8a927a4e..b8c75e45a950 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -35,6 +36,22 @@ #define OLD_NOP 0xe1a00000 /* mov r0, r0 */ +static int __ftrace_modify_code(void *data) +{ + int *command = data; + + set_kernel_text_rw(); + ftrace_modify_all_code(*command); + set_kernel_text_ro(); + + return 0; +} + +void arch_ftrace_update_code(int command) +{ + stop_machine(__ftrace_modify_code, &command, NULL); +} + static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) { return rec->arch.old_mcount ? OLD_NOP : NOP; @@ -73,6 +90,8 @@ int ftrace_arch_code_modify_prepare(void) int ftrace_arch_code_modify_post_process(void) { set_all_modules_text_ro(); + /* Make sure any TLB misses during machine stop are cleared. */ + flush_tlb_all(); return 0; } diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c index 4ce4f789446d..afeeb9ea6f43 100644 --- a/arch/arm/kernel/jump_label.c +++ b/arch/arm/kernel/jump_label.c @@ -19,7 +19,7 @@ static void __arch_jump_label_transform(struct jump_entry *entry, insn = arm_gen_nop(); if (is_static) - __patch_text(addr, insn); + __patch_text_early(addr, insn); else patch_text(addr, insn); } diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index a74b53c1b7df..07db2f8a1b45 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -12,8 +12,12 @@ #include #include #include +#include + #include +#include "patch.h" + struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { { "r0", 4, offsetof(struct pt_regs, ARM_r0)}, @@ -244,6 +248,31 @@ void kgdb_arch_exit(void) unregister_die_notifier(&kgdb_notifier); } +int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) +{ + int err; + + /* patch_text() only supports int-sized breakpoints */ + BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE); + + err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, + BREAK_INSTR_SIZE); + if (err) + return err; + + patch_text((void *)bpt->bpt_addr, + *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); + + return err; +} + +int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) +{ + patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr); + + return 0; +} + /* * Register our undef instruction hooks with ARM undef core. * We regsiter a hook specifically looking for the KGB break inst diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 4f75192f2ef9..de2b085ad753 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -29,6 +29,7 @@ extern unsigned long kexec_boot_atags; static atomic_t waiting_for_crash_ipi; +static unsigned long dt_mem; /* * Provide a dummy crash_notes definition while crash dump arrives to arm. * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. @@ -64,7 +65,7 @@ int machine_kexec_prepare(struct kimage *image) return err; if (be32_to_cpu(header) == OF_DT_HEADER) - kexec_boot_atags = current_segment->mem; + dt_mem = current_segment->mem; } return 0; } @@ -163,12 +164,12 @@ void machine_kexec(struct kimage *image) reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ + set_kernel_text_rw(); kexec_start_address = image->start; kexec_indirection_page = page_list; kexec_mach_type = machine_arch_type; - if (!kexec_boot_atags) - kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; - + kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET + + KEXEC_ARM_ATAGS_OFFSET; /* copy our kernel relocation code to the control code page */ reboot_entry = fncpy(reboot_code_buffer, diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c index 07314af47733..5038960e3c55 100644 --- a/arch/arm/kernel/patch.c +++ b/arch/arm/kernel/patch.c @@ -1,8 +1,11 @@ #include +#include #include +#include #include #include +#include #include #include @@ -13,21 +16,77 @@ struct patch { unsigned int insn; }; -void __kprobes __patch_text(void *addr, unsigned int insn) +static DEFINE_SPINLOCK(patch_lock); + +static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) + __acquires(&patch_lock) +{ + unsigned int uintaddr = (uintptr_t) addr; + bool module = !core_kernel_text(uintaddr); + struct page *page; + + if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) + page = vmalloc_to_page(addr); + else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) + page = virt_to_page(addr); + else + return addr; + + if (flags) + spin_lock_irqsave(&patch_lock, *flags); + else + __acquire(&patch_lock); + + set_fixmap(fixmap, page_to_phys(page)); + + return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); +} + +static void __kprobes patch_unmap(int fixmap, unsigned long *flags) + __releases(&patch_lock) +{ + clear_fixmap(fixmap); + + if (flags) + spin_unlock_irqrestore(&patch_lock, *flags); + else + __release(&patch_lock); +} + +void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap) { bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); + unsigned int uintaddr = (uintptr_t) addr; + bool twopage = false; + unsigned long flags; + void *waddr = addr; int size; + if (remap) + waddr = patch_map(addr, FIX_TEXT_POKE0, &flags); + else + __acquire(&patch_lock); + if (thumb2 && __opcode_is_thumb16(insn)) { - *(u16 *)addr = __opcode_to_mem_thumb16(insn); + *(u16 *)waddr = __opcode_to_mem_thumb16(insn); size = sizeof(u16); - } else if (thumb2 && ((uintptr_t)addr & 2)) { + } else if (thumb2 && (uintaddr & 2)) { u16 first = __opcode_thumb32_first(insn); u16 second = __opcode_thumb32_second(insn); - u16 *addrh = addr; + u16 *addrh0 = waddr; + u16 *addrh1 = waddr + 2; - addrh[0] = __opcode_to_mem_thumb16(first); - addrh[1] = __opcode_to_mem_thumb16(second); + twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2; + if (twopage && remap) + addrh1 = patch_map(addr + 2, FIX_TEXT_POKE1, NULL); + + *addrh0 = __opcode_to_mem_thumb16(first); + *addrh1 = __opcode_to_mem_thumb16(second); + + if (twopage && addrh1 != addr + 2) { + flush_kernel_vmap_range(addrh1, 2); + patch_unmap(FIX_TEXT_POKE1, NULL); + } size = sizeof(u32); } else { @@ -36,10 +95,16 @@ void __kprobes __patch_text(void *addr, unsigned int insn) else insn = __opcode_to_mem_arm(insn); - *(u32 *)addr = insn; + *(u32 *)waddr = insn; size = sizeof(u32); } + if (waddr != addr) { + flush_kernel_vmap_range(waddr, twopage ? size / 2 : size); + patch_unmap(FIX_TEXT_POKE0, &flags); + } else + __release(&patch_lock); + flush_icache_range((uintptr_t)(addr), (uintptr_t)(addr) + size); } @@ -60,16 +125,5 @@ void __kprobes patch_text(void *addr, unsigned int insn) .insn = insn, }; - if (cache_ops_need_broadcast()) { - stop_machine(patch_text_stop_machine, &patch, cpu_online_mask); - } else { - bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL) - && __opcode_is_thumb32(insn) - && ((uintptr_t)addr & 2); - - if (straddles_word) - stop_machine(patch_text_stop_machine, &patch, NULL); - else - __patch_text(addr, insn); - } + stop_machine(patch_text_stop_machine, &patch, NULL); } diff --git a/arch/arm/kernel/patch.h b/arch/arm/kernel/patch.h index b4731f2dac38..77e054c2f6cd 100644 --- a/arch/arm/kernel/patch.h +++ b/arch/arm/kernel/patch.h @@ -2,6 +2,16 @@ #define _ARM_KERNEL_PATCH_H void patch_text(void *addr, unsigned int insn); -void __patch_text(void *addr, unsigned int insn); +void __patch_text_real(void *addr, unsigned int insn, bool remap); + +static inline void __patch_text(void *addr, unsigned int insn) +{ + __patch_text_real(addr, insn, true); +} + +static inline void __patch_text_early(void *addr, unsigned int insn) +{ + __patch_text_real(addr, insn, false); +} #endif diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 8e95aa47457a..b31aa73e8076 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -8,6 +8,9 @@ #include #include #include +#ifdef CONFIG_ARM_KERNMEM_PERMS +#include +#endif #define PROC_INFO \ . = ALIGN(4); \ @@ -90,6 +93,11 @@ SECTIONS _text = .; HEAD_TEXT } + +#ifdef CONFIG_ARM_KERNMEM_PERMS + . = ALIGN(1< #include "mm.h" -pte_t *fixmap_page_table; - static inline void set_fixmap_pte(int idx, pte_t pte) { unsigned long vaddr = __fix_to_virt(idx); - set_pte_ext(fixmap_page_table + idx, pte, 0); + pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); + + set_pte_ext(ptep, pte, 0); local_flush_tlb_kernel_page(vaddr); } static inline pte_t get_fixmap_pte(unsigned long vaddr) { - unsigned long idx = __virt_to_fix(vaddr); - return *(fixmap_page_table + idx); + pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); + + return *ptep; } void *kmap(struct page *page) @@ -84,7 +85,7 @@ void *kmap_atomic(struct page *page) * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ - BUG_ON(!pte_none(*(fixmap_page_table + idx))); + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); #endif /* * When debugging is off, kunmap_atomic leaves the previous mapping @@ -137,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn) idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(fixmap_page_table + idx))); + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); #endif set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 6ca53c338519..ba87b1b3565f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -570,7 +571,7 @@ void __init mem_init(void) MLK(DTCM_OFFSET, (unsigned long) dtcm_end), MLK(ITCM_OFFSET, (unsigned long) itcm_end), #endif - MLK(FIXADDR_START, FIXADDR_TOP), + MLK(FIXADDR_START, FIXADDR_END), MLM(VMALLOC_START, VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM @@ -615,7 +616,145 @@ void __init mem_init(void) } } -void free_initmem(void) +#ifdef CONFIG_ARM_KERNMEM_PERMS +struct section_perm { + unsigned long start; + unsigned long end; + pmdval_t mask; + pmdval_t prot; + pmdval_t clear; +}; + +static struct section_perm nx_perms[] = { + /* Make pages tables, etc before _stext RW (set NX). */ + { + .start = PAGE_OFFSET, + .end = (unsigned long)_stext, + .mask = ~PMD_SECT_XN, + .prot = PMD_SECT_XN, + }, + /* Make init RW (set NX). */ + { + .start = (unsigned long)__init_begin, + .end = (unsigned long)_sdata, + .mask = ~PMD_SECT_XN, + .prot = PMD_SECT_XN, + }, +#ifdef CONFIG_DEBUG_RODATA + /* Make rodata NX (set RO in ro_perms below). */ + { + .start = (unsigned long)__start_rodata, + .end = (unsigned long)__init_begin, + .mask = ~PMD_SECT_XN, + .prot = PMD_SECT_XN, + }, +#endif +}; + +#ifdef CONFIG_DEBUG_RODATA +static struct section_perm ro_perms[] = { + /* Make kernel code and rodata RX (set RO). */ + { + .start = (unsigned long)_stext, + .end = (unsigned long)__init_begin, +#ifdef CONFIG_ARM_LPAE + .mask = ~PMD_SECT_RDONLY, + .prot = PMD_SECT_RDONLY, +#else + .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), + .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, + .clear = PMD_SECT_AP_WRITE, +#endif + }, +}; +#endif + +/* + * Updates section permissions only for the current mm (sections are + * copied into each mm). During startup, this is the init_mm. Is only + * safe to be called with preemption disabled, as under stop_machine(). + */ +static inline void section_update(unsigned long addr, pmdval_t mask, + pmdval_t prot) +{ + struct mm_struct *mm; + pmd_t *pmd; + + mm = current->active_mm; + pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); + +#ifdef CONFIG_ARM_LPAE + pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); +#else + if (addr & SECTION_SIZE) + pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); + else + pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); +#endif + flush_pmd_entry(pmd); + local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); +} + +/* Make sure extended page tables are in use. */ +static inline bool arch_has_strict_perms(void) +{ + if (cpu_architecture() < CPU_ARCH_ARMv6) + return false; + + return !!(get_cr() & CR_XP); +} + +#define set_section_perms(perms, field) { \ + size_t i; \ + unsigned long addr; \ + \ + if (!arch_has_strict_perms()) \ + return; \ + \ + for (i = 0; i < ARRAY_SIZE(perms); i++) { \ + if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ + !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ + pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ + perms[i].start, perms[i].end, \ + SECTION_SIZE); \ + continue; \ + } \ + \ + for (addr = perms[i].start; \ + addr < perms[i].end; \ + addr += SECTION_SIZE) \ + section_update(addr, perms[i].mask, \ + perms[i].field); \ + } \ +} + +static inline void fix_kernmem_perms(void) +{ + set_section_perms(nx_perms, prot); +} + +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void) +{ + set_section_perms(ro_perms, prot); +} + +void set_kernel_text_rw(void) +{ + set_section_perms(ro_perms, clear); +} + +void set_kernel_text_ro(void) +{ + set_section_perms(ro_perms, prot); +} +#endif /* CONFIG_DEBUG_RODATA */ + +#else +static inline void fix_kernmem_perms(void) { } +#endif /* CONFIG_ARM_KERNMEM_PERMS */ + +void free_tcmmem(void) { #ifdef CONFIG_HAVE_TCM extern char __tcm_start, __tcm_end; @@ -623,6 +762,12 @@ void free_initmem(void) poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); #endif +} + +void free_initmem(void) +{ + fix_kernmem_perms(); + free_tcmmem(); poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index f86ce1a9f525..cda7c40999b6 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -356,6 +357,29 @@ const struct mem_type *get_mem_type(unsigned int type) } EXPORT_SYMBOL(get_mem_type); +/* + * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). + * As a result, this can only be called with preemption disabled, as under + * stop_machine(). + */ +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) +{ + unsigned long vaddr = __fix_to_virt(idx); + pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr); + + /* Make sure fixmap region does not exceed available allocation. */ + BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > + FIXADDR_END); + BUG_ON(idx >= __end_of_fixed_addresses); + + if (pgprot_val(prot)) + set_pte_at(NULL, vaddr, pte, + pfn_pte(phys >> PAGE_SHIFT, prot)); + else + pte_clear(NULL, vaddr, pte); + local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); +} + /* * Adjust the PMD section entries according to the CPU in use. */ @@ -1296,10 +1320,10 @@ static void __init kmap_init(void) #ifdef CONFIG_HIGHMEM pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), PKMAP_BASE, _PAGE_KERNEL_TABLE); - - fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START), - FIXADDR_START, _PAGE_KERNEL_TABLE); #endif + + early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START, + _PAGE_KERNEL_TABLE); } static void __init map_lowmem(void) @@ -1319,12 +1343,19 @@ static void __init map_lowmem(void) if (start >= end) break; - if (end < kernel_x_start || start >= kernel_x_end) { + if (end < kernel_x_start) { map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); map.length = end - start; map.type = MT_MEMORY_RWX; + create_mapping(&map); + } else if (start >= kernel_x_end) { + map.pfn = __phys_to_pfn(start); + map.virtual = __phys_to_virt(start); + map.length = end - start; + map.type = MT_MEMORY_RW; + create_mapping(&map); } else { /* This better cover the entire kernel */