arm64 fixes for -rc3
- Fix early use of kprobes - Fix kernel placement in kexec_file_load() - Bump maximum number of NUMA nodes -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAl+lLeQQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNHgjB/9RJMWwEo6TJ0JyJdBmgEy9k+F5k7zUEtNO dmZBXt1V8Gvw2MLRKAayWLumoJCUf0ZTICJ9+wnYAKkGtKvDfuEofrEOe/W/jB8m V2Nm7Y+UWL/D0E5+jdyGIqsPiljaZg8GCyOxN6BDuqgl/T8/3YlpSudMvlr7xm8s F71k2u2EvSybcRFmtp9A5x0eUeWRSQtLa1+fWmpyAPAX64YJ9bh2w3/g5SecocUK Ra8H91XO5BT2sHsDDQe67iUfZz9Y1N1UbNiuzCZIL7+xTcQ6DKw4JJ/2Z5BfkH0D 04THZZqYt5AjYQmUULMmPcbSzMp4E30s5dmckevq8E+LG0imLDYp =w7Ip -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "Here's the weekly batch of fixes for arm64. Not an awful lot here, but there are still a few unresolved issues relating to CPU hotplug, RCU and IRQ tracing that I hope to queue fixes for next week. Summary: - Fix early use of kprobes - Fix kernel placement in kexec_file_load() - Bump maximum number of NUMA nodes" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: kexec_file: try more regions if loading segments fails arm64: kprobes: Use BRK instead of single-step when executing instructions out-of-line arm64: NUMA: Kconfig: Increase NODES_SHIFT to 4
This commit is contained in:
commit
30f3f68e27
|
@ -1002,7 +1002,7 @@ config NUMA
|
|||
config NODES_SHIFT
|
||||
int "Maximum NUMA Nodes (as a power of 2)"
|
||||
range 1 10
|
||||
default "2"
|
||||
default "4"
|
||||
depends on NEED_MULTIPLE_NODES
|
||||
help
|
||||
Specify the maximum number of NUMA Nodes available on the target
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
* #imm16 values used for BRK instruction generation
|
||||
* 0x004: for installing kprobes
|
||||
* 0x005: for installing uprobes
|
||||
* 0x006: for kprobe software single-step
|
||||
* Allowed values for kgdb are 0x400 - 0x7ff
|
||||
* 0x100: for triggering a fault on purpose (reserved)
|
||||
* 0x400: for dynamic BRK instruction
|
||||
|
@ -19,6 +20,7 @@
|
|||
*/
|
||||
#define KPROBES_BRK_IMM 0x004
|
||||
#define UPROBES_BRK_IMM 0x005
|
||||
#define KPROBES_BRK_SS_IMM 0x006
|
||||
#define FAULT_BRK_IMM 0x100
|
||||
#define KGDB_DYN_DBG_BRK_IMM 0x400
|
||||
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
|
||||
/* kprobes BRK opcodes with ESR encoding */
|
||||
#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5))
|
||||
#define BRK64_OPCODE_KPROBES_SS (AARCH64_BREAK_MON | (KPROBES_BRK_SS_IMM << 5))
|
||||
/* uprobes BRK opcodes with ESR encoding */
|
||||
#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5))
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include <linux/percpu.h>
|
||||
|
||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
#define MAX_INSN_SIZE 1
|
||||
#define MAX_INSN_SIZE 2
|
||||
|
||||
#define flush_insn_slot(p) do { } while (0)
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
|
|
@ -43,7 +43,7 @@ static void *image_load(struct kimage *image,
|
|||
u64 flags, value;
|
||||
bool be_image, be_kernel;
|
||||
struct kexec_buf kbuf;
|
||||
unsigned long text_offset;
|
||||
unsigned long text_offset, kernel_segment_number;
|
||||
struct kexec_segment *kernel_segment;
|
||||
int ret;
|
||||
|
||||
|
@ -88,11 +88,37 @@ static void *image_load(struct kimage *image,
|
|||
/* Adjust kernel segment with TEXT_OFFSET */
|
||||
kbuf.memsz += text_offset;
|
||||
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
kernel_segment_number = image->nr_segments;
|
||||
|
||||
kernel_segment = &image->segment[image->nr_segments - 1];
|
||||
/*
|
||||
* The location of the kernel segment may make it impossible to satisfy
|
||||
* the other segment requirements, so we try repeatedly to find a
|
||||
* location that will work.
|
||||
*/
|
||||
while ((ret = kexec_add_buffer(&kbuf)) == 0) {
|
||||
/* Try to load additional data */
|
||||
kernel_segment = &image->segment[kernel_segment_number];
|
||||
ret = load_other_segments(image, kernel_segment->mem,
|
||||
kernel_segment->memsz, initrd,
|
||||
initrd_len, cmdline);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
/*
|
||||
* We couldn't find space for the other segments; erase the
|
||||
* kernel segment and try the next available hole.
|
||||
*/
|
||||
image->nr_segments -= 1;
|
||||
kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
pr_err("Could not find any suitable kernel location!");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
kernel_segment = &image->segment[kernel_segment_number];
|
||||
kernel_segment->mem += text_offset;
|
||||
kernel_segment->memsz -= text_offset;
|
||||
image->start = kernel_segment->mem;
|
||||
|
@ -101,12 +127,7 @@ static void *image_load(struct kimage *image,
|
|||
kernel_segment->mem, kbuf.bufsz,
|
||||
kernel_segment->memsz);
|
||||
|
||||
/* Load additional data */
|
||||
ret = load_other_segments(image,
|
||||
kernel_segment->mem, kernel_segment->memsz,
|
||||
initrd, initrd_len, cmdline);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
|
||||
|
|
|
@ -240,6 +240,11 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tries to add the initrd and DTB to the image. If it is not possible to find
|
||||
* valid locations, this function will undo changes to the image and return non
|
||||
* zero.
|
||||
*/
|
||||
int load_other_segments(struct kimage *image,
|
||||
unsigned long kernel_load_addr,
|
||||
unsigned long kernel_size,
|
||||
|
@ -248,7 +253,8 @@ int load_other_segments(struct kimage *image,
|
|||
{
|
||||
struct kexec_buf kbuf;
|
||||
void *headers, *dtb = NULL;
|
||||
unsigned long headers_sz, initrd_load_addr = 0, dtb_len;
|
||||
unsigned long headers_sz, initrd_load_addr = 0, dtb_len,
|
||||
orig_segments = image->nr_segments;
|
||||
int ret = 0;
|
||||
|
||||
kbuf.image = image;
|
||||
|
@ -334,6 +340,7 @@ int load_other_segments(struct kimage *image,
|
|||
return 0;
|
||||
|
||||
out_err:
|
||||
image->nr_segments = orig_segments;
|
||||
vfree(dtb);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -36,25 +36,16 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
|||
static void __kprobes
|
||||
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
|
||||
|
||||
static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
|
||||
{
|
||||
void *addrs[1];
|
||||
u32 insns[1];
|
||||
|
||||
addrs[0] = addr;
|
||||
insns[0] = opcode;
|
||||
|
||||
return aarch64_insn_patch_text(addrs, insns, 1);
|
||||
}
|
||||
|
||||
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
|
||||
{
|
||||
/* prepare insn slot */
|
||||
patch_text(p->ainsn.api.insn, p->opcode);
|
||||
kprobe_opcode_t *addr = p->ainsn.api.insn;
|
||||
void *addrs[] = {addr, addr + 1};
|
||||
u32 insns[] = {p->opcode, BRK64_OPCODE_KPROBES_SS};
|
||||
|
||||
flush_icache_range((uintptr_t) (p->ainsn.api.insn),
|
||||
(uintptr_t) (p->ainsn.api.insn) +
|
||||
MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
||||
/* prepare insn slot */
|
||||
aarch64_insn_patch_text(addrs, insns, 2);
|
||||
|
||||
flush_icache_range((uintptr_t)addr, (uintptr_t)(addr + MAX_INSN_SIZE));
|
||||
|
||||
/*
|
||||
* Needs restoring of return address after stepping xol.
|
||||
|
@ -128,13 +119,18 @@ void *alloc_insn_page(void)
|
|||
/* arm kprobe: install breakpoint in text */
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
patch_text(p->addr, BRK64_OPCODE_KPROBES);
|
||||
void *addr = p->addr;
|
||||
u32 insn = BRK64_OPCODE_KPROBES;
|
||||
|
||||
aarch64_insn_patch_text(&addr, &insn, 1);
|
||||
}
|
||||
|
||||
/* disarm kprobe: remove breakpoint from text */
|
||||
void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
||||
{
|
||||
patch_text(p->addr, p->opcode);
|
||||
void *addr = p->addr;
|
||||
|
||||
aarch64_insn_patch_text(&addr, &p->opcode, 1);
|
||||
}
|
||||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
|
@ -163,20 +159,15 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
|
|||
}
|
||||
|
||||
/*
|
||||
* Interrupts need to be disabled before single-step mode is set, and not
|
||||
* reenabled until after single-step mode ends.
|
||||
* Without disabling interrupt on local CPU, there is a chance of
|
||||
* interrupt occurrence in the period of exception return and start of
|
||||
* out-of-line single-step, that result in wrongly single stepping
|
||||
* into the interrupt handler.
|
||||
* Mask all of DAIF while executing the instruction out-of-line, to keep things
|
||||
* simple and avoid nesting exceptions. Interrupts do have to be disabled since
|
||||
* the kprobe state is per-CPU and doesn't get migrated.
|
||||
*/
|
||||
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
kcb->saved_irqflag = regs->pstate & DAIF_MASK;
|
||||
regs->pstate |= PSR_I_BIT;
|
||||
/* Unmask PSTATE.D for enabling software step exceptions. */
|
||||
regs->pstate &= ~PSR_D_BIT;
|
||||
regs->pstate |= DAIF_MASK;
|
||||
}
|
||||
|
||||
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
|
||||
|
@ -219,10 +210,7 @@ static void __kprobes setup_singlestep(struct kprobe *p,
|
|||
slot = (unsigned long)p->ainsn.api.insn;
|
||||
|
||||
set_ss_context(kcb, slot); /* mark pending ss */
|
||||
|
||||
/* IRQs and single stepping do not mix well. */
|
||||
kprobes_save_local_irqflag(kcb, regs);
|
||||
kernel_enable_single_step(regs);
|
||||
instruction_pointer_set(regs, slot);
|
||||
} else {
|
||||
/* insn simulation */
|
||||
|
@ -273,12 +261,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
|
|||
}
|
||||
/* call post handler */
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
if (cur->post_handler) {
|
||||
/* post_handler can hit breakpoint and single step
|
||||
* again, so we enable D-flag for recursive exception.
|
||||
*/
|
||||
if (cur->post_handler)
|
||||
cur->post_handler(cur, regs, 0);
|
||||
}
|
||||
|
||||
reset_current_kprobe();
|
||||
}
|
||||
|
@ -302,8 +286,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
|||
if (!instruction_pointer(regs))
|
||||
BUG();
|
||||
|
||||
kernel_disable_single_step();
|
||||
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
restore_previous_kprobe(kcb);
|
||||
else
|
||||
|
@ -365,10 +347,6 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
* pre-handler and it returned non-zero, it will
|
||||
* modify the execution path and no need to single
|
||||
* stepping. Let's just reset current kprobe and exit.
|
||||
*
|
||||
* pre_handler can hit a breakpoint and can step thru
|
||||
* before return, keep PSTATE D-flag enabled until
|
||||
* pre_handler return back.
|
||||
*/
|
||||
if (!p->pre_handler || !p->pre_handler(p, regs)) {
|
||||
setup_singlestep(p, regs, kcb, 0);
|
||||
|
@ -399,7 +377,7 @@ kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
|
|||
}
|
||||
|
||||
static int __kprobes
|
||||
kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
|
||||
kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
int retval;
|
||||
|
@ -409,16 +387,15 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
|
|||
|
||||
if (retval == DBG_HOOK_HANDLED) {
|
||||
kprobes_restore_local_irqflag(kcb, regs);
|
||||
kernel_disable_single_step();
|
||||
|
||||
post_kprobe_handler(kcb, regs);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static struct step_hook kprobes_step_hook = {
|
||||
.fn = kprobe_single_step_handler,
|
||||
static struct break_hook kprobes_break_ss_hook = {
|
||||
.imm = KPROBES_BRK_SS_IMM,
|
||||
.fn = kprobe_breakpoint_ss_handler,
|
||||
};
|
||||
|
||||
static int __kprobes
|
||||
|
@ -486,7 +463,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
|
|||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
register_kernel_break_hook(&kprobes_break_hook);
|
||||
register_kernel_step_hook(&kprobes_step_hook);
|
||||
register_kernel_break_hook(&kprobes_break_ss_hook);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue