uprobes: Export 'struct return_instance', introduce arch_uretprobe_is_alive()
Add the new "weak" helper, arch_uretprobe_is_alive(), used by the next patches. It should return true if this return_instance is still valid. The arch agnostic version just always returns true. The patch exports "struct return_instance" for the architectures which want to override this hook. We can also cleanup prepare_uretprobe() if we pass the new return_instance to arch_uretprobe_hijack_return_addr(). Tested-by: Pratyush Anand <panand@redhat.com> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Acked-by: Anton Arapov <arapov@gmail.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20150721134016.GA4762@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
a83cfeb921
commit
97da89767d
|
@ -92,6 +92,15 @@ struct uprobe_task {
|
||||||
unsigned int depth;
|
unsigned int depth;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct return_instance {
|
||||||
|
struct uprobe *uprobe;
|
||||||
|
unsigned long func;
|
||||||
|
unsigned long orig_ret_vaddr; /* original return address */
|
||||||
|
bool chained; /* true, if instance is nested */
|
||||||
|
|
||||||
|
struct return_instance *next; /* keep as stack */
|
||||||
|
};
|
||||||
|
|
||||||
struct xol_area;
|
struct xol_area;
|
||||||
|
|
||||||
struct uprobes_state {
|
struct uprobes_state {
|
||||||
|
@ -128,6 +137,7 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
|
||||||
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
|
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
|
||||||
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
|
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
|
||||||
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
|
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
|
||||||
|
extern bool arch_uretprobe_is_alive(struct return_instance *ret, struct pt_regs *regs);
|
||||||
extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
|
extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
|
||||||
extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
|
extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
|
||||||
void *src, unsigned long len);
|
void *src, unsigned long len);
|
||||||
|
|
|
@ -86,15 +86,6 @@ struct uprobe {
|
||||||
struct arch_uprobe arch;
|
struct arch_uprobe arch;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct return_instance {
|
|
||||||
struct uprobe *uprobe;
|
|
||||||
unsigned long func;
|
|
||||||
unsigned long orig_ret_vaddr; /* original return address */
|
|
||||||
bool chained; /* true, if instance is nested */
|
|
||||||
|
|
||||||
struct return_instance *next; /* keep as stack */
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Execute out of line area: anonymous executable mapping installed
|
* Execute out of line area: anonymous executable mapping installed
|
||||||
* by the probed task to execute the copy of the original instruction
|
* by the probed task to execute the copy of the original instruction
|
||||||
|
@ -1818,6 +1809,11 @@ bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool __weak arch_uretprobe_is_alive(struct return_instance *ret, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Run handler and ask thread to singlestep.
|
* Run handler and ask thread to singlestep.
|
||||||
* Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
|
* Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
|
||||||
|
|
Loading…
Reference in New Issue