Merge branch 'for-next/misc' into for-next/core

* for-next/misc:
  : Miscellaneous patches
  arm64: vmlinux.lds.S: Drop redundant *.init.rodata.*
  kasan: arm64: set TCR_EL1.TBID1 when enabled
  arm64: mte: optimize asynchronous tag check fault flag check
  arm64/mm: add fallback option to allocate virtually contiguous memory
  arm64/smp: Drop the macro S(x,s)
  arm64: consistently use reserved_pg_dir
  arm64: kprobes: Remove redundant kprobe_step_ctx

# Conflicts:
#	arch/arm64/kernel/vmlinux.lds.S
This commit is contained in:
Catalin Marinas 2020-12-09 18:04:48 +00:00
commit ba4259a6f8
15 changed files with 45 additions and 83 deletions

View File

@ -15,10 +15,10 @@
.macro __uaccess_ttbr0_disable, tmp1 .macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir mrs \tmp1, ttbr1_el1 // swapper_pg_dir
bic \tmp1, \tmp1, #TTBR_ASID_MASK bic \tmp1, \tmp1, #TTBR_ASID_MASK
sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir sub \tmp1, \tmp1, #PAGE_SIZE // reserved_pg_dir just before swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb isb
add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE add \tmp1, \tmp1, #PAGE_SIZE
msr ttbr1_el1, \tmp1 // set reserved ASID msr ttbr1_el1, \tmp1 // set reserved ASID
isb isb
.endm .endm

View File

@ -89,12 +89,6 @@
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end)) #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
#define RESERVED_TTBR0_SIZE (PAGE_SIZE)
#else
#define RESERVED_TTBR0_SIZE (0)
#endif
/* Initial memory map size */ /* Initial memory map size */
#if ARM64_SWAPPER_USES_SECTION_MAPS #if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT

View File

@ -28,18 +28,11 @@ struct prev_kprobe {
unsigned int status; unsigned int status;
}; };
/* Single step context for kprobe */
struct kprobe_step_ctx {
unsigned long ss_pending;
unsigned long match_addr;
};
/* per-cpu kprobe control block */ /* per-cpu kprobe control block */
struct kprobe_ctlblk { struct kprobe_ctlblk {
unsigned int kprobe_status; unsigned int kprobe_status;
unsigned long saved_irqflag; unsigned long saved_irqflag;
struct prev_kprobe prev_kprobe; struct prev_kprobe prev_kprobe;
struct kprobe_step_ctx ss_ctx;
}; };
void arch_remove_kprobe(struct kprobe *); void arch_remove_kprobe(struct kprobe *);

View File

@ -36,11 +36,11 @@ static inline void contextidr_thread_switch(struct task_struct *next)
} }
/* /*
* Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
*/ */
static inline void cpu_set_reserved_ttbr0(void) static inline void cpu_set_reserved_ttbr0(void)
{ {
unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page)); unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
write_sysreg(ttbr, ttbr0_el1); write_sysreg(ttbr, ttbr0_el1);
isb(); isb();
@ -195,7 +195,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
return; return;
if (mm == &init_mm) if (mm == &init_mm)
ttbr = __pa_symbol(empty_zero_page); ttbr = __pa_symbol(reserved_pg_dir);
else else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;

View File

@ -260,6 +260,7 @@
#define TCR_TBI1 (UL(1) << 38) #define TCR_TBI1 (UL(1) << 38)
#define TCR_HA (UL(1) << 39) #define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40) #define TCR_HD (UL(1) << 40)
#define TCR_TBID1 (UL(1) << 52)
#define TCR_NFD0 (UL(1) << 53) #define TCR_NFD0 (UL(1) << 53)
#define TCR_NFD1 (UL(1) << 54) #define TCR_NFD1 (UL(1) << 54)
#define TCR_E0PD0 (UL(1) << 55) #define TCR_E0PD0 (UL(1) << 55)

View File

@ -519,6 +519,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_end[]; extern pgd_t idmap_pg_end[];
extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);

View File

@ -86,8 +86,8 @@ static inline void __uaccess_ttbr0_disable(void)
local_irq_save(flags); local_irq_save(flags);
ttbr = read_sysreg(ttbr1_el1); ttbr = read_sysreg(ttbr1_el1);
ttbr &= ~TTBR_ASID_MASK; ttbr &= ~TTBR_ASID_MASK;
/* reserved_ttbr0 placed before swapper_pg_dir */ /* reserved_pg_dir placed before swapper_pg_dir */
write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1); write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1);
isb(); isb();
/* Set reserved ASID */ /* Set reserved ASID */
write_sysreg(ttbr, ttbr1_el1); write_sysreg(ttbr, ttbr1_el1);

View File

@ -795,9 +795,10 @@ SYM_CODE_END(ret_to_user)
*/ */
.pushsection ".entry.tramp.text", "ax" .pushsection ".entry.tramp.text", "ax"
// Move from tramp_pg_dir to swapper_pg_dir
.macro tramp_map_kernel, tmp .macro tramp_map_kernel, tmp
mrs \tmp, ttbr1_el1 mrs \tmp, ttbr1_el1
add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE) add \tmp, \tmp, #(2 * PAGE_SIZE)
bic \tmp, \tmp, #USER_ASID_FLAG bic \tmp, \tmp, #USER_ASID_FLAG
msr ttbr1_el1, \tmp msr ttbr1_el1, \tmp
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
@ -814,9 +815,10 @@ alternative_else_nop_endif
#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
.endm .endm
// Move from swapper_pg_dir to tramp_pg_dir
.macro tramp_unmap_kernel, tmp .macro tramp_unmap_kernel, tmp
mrs \tmp, ttbr1_el1 mrs \tmp, ttbr1_el1
sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE) sub \tmp, \tmp, #(2 * PAGE_SIZE)
orr \tmp, \tmp, #USER_ASID_FLAG orr \tmp, \tmp, #USER_ASID_FLAG
msr ttbr1_el1, \tmp msr ttbr1_el1, \tmp
/* /*

View File

@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p) static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{ {
@ -68,7 +68,7 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
/* single step simulated, now go for post processing */ /* single step simulated, now go for post processing */
post_kprobe_handler(kcb, regs); post_kprobe_handler(p, kcb, regs);
} }
int __kprobes arch_prepare_kprobe(struct kprobe *p) int __kprobes arch_prepare_kprobe(struct kprobe *p)
@ -177,19 +177,6 @@ static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
regs->pstate |= kcb->saved_irqflag; regs->pstate |= kcb->saved_irqflag;
} }
static void __kprobes
set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
{
kcb->ss_ctx.ss_pending = true;
kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
}
static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
{
kcb->ss_ctx.ss_pending = false;
kcb->ss_ctx.match_addr = 0;
}
static void __kprobes setup_singlestep(struct kprobe *p, static void __kprobes setup_singlestep(struct kprobe *p,
struct pt_regs *regs, struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter) struct kprobe_ctlblk *kcb, int reenter)
@ -209,7 +196,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
/* prepare for single stepping */ /* prepare for single stepping */
slot = (unsigned long)p->ainsn.api.insn; slot = (unsigned long)p->ainsn.api.insn;
set_ss_context(kcb, slot); /* mark pending ss */
kprobes_save_local_irqflag(kcb, regs); kprobes_save_local_irqflag(kcb, regs);
instruction_pointer_set(regs, slot); instruction_pointer_set(regs, slot);
} else { } else {
@ -243,13 +229,8 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
} }
static void __kprobes static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
{ {
struct kprobe *cur = kprobe_running();
if (!cur)
return;
/* return addr restore if non-branching insn */ /* return addr restore if non-branching insn */
if (cur->ainsn.api.restore != 0) if (cur->ainsn.api.restore != 0)
instruction_pointer_set(regs, cur->ainsn.api.restore); instruction_pointer_set(regs, cur->ainsn.api.restore);
@ -364,33 +345,23 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
*/ */
} }
static int __kprobes
kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
{
if ((kcb->ss_ctx.ss_pending)
&& (kcb->ss_ctx.match_addr == addr)) {
clear_ss_context(kcb); /* clear pending ss */
return DBG_HOOK_HANDLED;
}
/* not ours, kprobes should ignore it */
return DBG_HOOK_ERROR;
}
static int __kprobes static int __kprobes
kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr) kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
{ {
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
int retval; unsigned long addr = instruction_pointer(regs);
struct kprobe *cur = kprobe_running();
/* return error if this is not our step */ if (cur && (kcb->kprobe_status == KPROBE_HIT_SS)
retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); && ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
if (retval == DBG_HOOK_HANDLED) {
kprobes_restore_local_irqflag(kcb, regs); kprobes_restore_local_irqflag(kcb, regs);
post_kprobe_handler(kcb, regs); post_kprobe_handler(cur, kcb, regs);
return DBG_HOOK_HANDLED;
} }
return retval; /* not ours, kprobes should ignore it */
return DBG_HOOK_ERROR;
} }
static struct break_hook kprobes_break_ss_hook = { static struct break_hook kprobes_break_ss_hook = {

View File

@ -366,7 +366,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
* faults in case uaccess_enable() is inadvertently called by the init * faults in case uaccess_enable() is inadvertently called by the init
* thread. * thread.
*/ */
init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page); init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir);
#endif #endif
if (boot_args[1] || boot_args[2] || boot_args[3]) { if (boot_args[1] || boot_args[2] || boot_args[3]) {

View File

@ -786,14 +786,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
} }
static const char *ipi_types[NR_IPI] __tracepoint_string = { static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s [IPI_RESCHEDULE] = "Rescheduling interrupts",
S(IPI_RESCHEDULE, "Rescheduling interrupts"), [IPI_CALL_FUNC] = "Function call interrupts",
S(IPI_CALL_FUNC, "Function call interrupts"), [IPI_CPU_STOP] = "CPU stop interrupts",
S(IPI_CPU_STOP, "CPU stop interrupts"), [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"), [IPI_TIMER] = "Timer broadcast interrupts",
S(IPI_TIMER, "Timer broadcast interrupts"), [IPI_IRQ_WORK] = "IRQ work interrupts",
S(IPI_IRQ_WORK, "IRQ work interrupts"), [IPI_WAKEUP] = "CPU wake-up interrupts",
S(IPI_WAKEUP, "CPU wake-up interrupts"),
}; };
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);

View File

@ -123,7 +123,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
user_exit(); user_exit();
if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) { if (flags & _TIF_MTE_ASYNC_FAULT) {
/* /*
* Process the asynchronous tag check fault before the actual * Process the asynchronous tag check fault before the actual
* syscall. do_notify_resume() will send a signal to userspace * syscall. do_notify_resume() will send a signal to userspace

View File

@ -164,13 +164,11 @@ SECTIONS
. += PAGE_SIZE; . += PAGE_SIZE;
#endif #endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN reserved_pg_dir = .;
reserved_ttbr0 = .; . += PAGE_SIZE;
. += RESERVED_TTBR0_SIZE;
#endif
swapper_pg_dir = .; swapper_pg_dir = .;
. += PAGE_SIZE; . += PAGE_SIZE;
swapper_pg_end = .;
. = ALIGN(SEGMENT_ALIGN); . = ALIGN(SEGMENT_ALIGN);
__init_begin = .; __init_begin = .;
@ -201,7 +199,7 @@ SECTIONS
INIT_CALLS INIT_CALLS
CON_INITCALL CON_INITCALL
INIT_RAM_FS INIT_RAM_FS
*(.init.altinstructions .init.rodata.* .init.bss) /* from the EFI stub */ *(.init.altinstructions .init.bss) /* from the EFI stub */
} }
.exit.data : { .exit.data : {
EXIT_DATA EXIT_DATA

View File

@ -1127,8 +1127,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
void *p = NULL; void *p = NULL;
p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
if (!p) if (!p) {
if (vmemmap_populate_basepages(addr, next, node, altmap))
return -ENOMEM; return -ENOMEM;
continue;
}
pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
} else } else

View File

@ -40,7 +40,7 @@
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
#ifdef CONFIG_KASAN_SW_TAGS #ifdef CONFIG_KASAN_SW_TAGS
#define TCR_KASAN_FLAGS TCR_TBI1 #define TCR_KASAN_FLAGS TCR_TBI1 | TCR_TBID1
#else #else
#define TCR_KASAN_FLAGS 0 #define TCR_KASAN_FLAGS 0
#endif #endif
@ -168,7 +168,7 @@ SYM_FUNC_END(cpu_do_resume)
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "awx"
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
adrp \tmp1, empty_zero_page adrp \tmp1, reserved_pg_dir
phys_to_ttbr \tmp2, \tmp1 phys_to_ttbr \tmp2, \tmp1
offset_ttbr1 \tmp2, \tmp1 offset_ttbr1 \tmp2, \tmp1
msr ttbr1_el1, \tmp2 msr ttbr1_el1, \tmp2