kprobes: support kretprobe blacklist
Introduce architecture dependent kretprobe blacklists to prohibit users from inserting return probes on the function in which kprobes can be inserted but kretprobes can not. This patch also removes "__kprobes" mark from "__switch_to" on x86_64 and registers "__switch_to" to the blacklist on x86-64, because that mark is to prohibit user from inserting only kretprobe. Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
49dce689ad
commit
f438d914b2
|
@ -22,6 +22,8 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe);
|
|||
static unsigned long kprobe_status;
|
||||
static struct pt_regs jprobe_saved_regs;
|
||||
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
int ret = 0;
|
||||
|
|
|
@ -40,6 +40,8 @@ extern void jprobe_inst_return(void);
|
|||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
|
||||
|
||||
enum instruction_type {A, I, M, F, B, L, X, u};
|
||||
static enum instruction_type bundle_encoding[32][3] = {
|
||||
{ M, I, I }, /* 00 */
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
int ret = 0;
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
/* Make sure the probe isn't going on a difficult instruction */
|
||||
|
|
|
@ -42,6 +42,8 @@
|
|||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
p->ainsn.insn[0] = *p->addr;
|
||||
|
|
|
@ -41,6 +41,13 @@ void jprobe_return_end(void);
|
|||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = {
|
||||
{"__switch_to", }, /* This function switches only current task, but
|
||||
doesn't switch kernel stack.*/
|
||||
{NULL, NULL} /* Terminator */
|
||||
};
|
||||
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
|
||||
|
||||
/* insert a jmp code */
|
||||
static __always_inline void set_jmp_op(void *from, void *to)
|
||||
{
|
||||
|
|
|
@ -48,6 +48,13 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p);
|
|||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = {
|
||||
{"__switch_to", }, /* This function switches only current task, but
|
||||
doesn't switch kernel stack.*/
|
||||
{NULL, NULL} /* Terminator */
|
||||
};
|
||||
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
|
||||
|
||||
/*
|
||||
* returns non-zero if opcode modifies the interrupt flag.
|
||||
*/
|
||||
|
|
|
@ -581,7 +581,7 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
|
|||
*
|
||||
* Kprobes not supported here. Set the probe on schedule instead.
|
||||
*/
|
||||
__kprobes struct task_struct *
|
||||
struct task_struct *
|
||||
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
{
|
||||
struct thread_struct *prev = &prev_p->thread,
|
||||
|
|
|
@ -17,6 +17,8 @@ typedef u16 kprobe_opcode_t;
|
|||
#define BREAKPOINT_INSTRUCTION 0xd673 /* breakpoint */
|
||||
#define MAX_INSN_SIZE 2
|
||||
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
||||
#define arch_remove_kprobe(p) do { } while (0)
|
||||
|
||||
/* Architecture specific copy of original instruction */
|
||||
|
|
|
@ -83,6 +83,7 @@ struct kprobe_ctlblk {
|
|||
};
|
||||
|
||||
#define ARCH_SUPPORTS_KRETPROBES
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
||||
#define SLOT0_OPCODE_SHIFT (37)
|
||||
#define SLOT1_p1_OPCODE_SHIFT (37 - (64-46))
|
||||
|
|
|
@ -82,6 +82,7 @@ typedef unsigned int kprobe_opcode_t;
|
|||
|
||||
#define ARCH_SUPPORTS_KRETPROBES
|
||||
#define flush_insn_slot(p) do { } while (0)
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
||||
void kretprobe_trampoline(void);
|
||||
extern void arch_remove_kprobe(struct kprobe *p);
|
||||
|
|
|
@ -47,6 +47,7 @@ typedef u16 kprobe_opcode_t;
|
|||
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
|
||||
|
||||
#define ARCH_SUPPORTS_KRETPROBES
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
||||
#define KPROBE_SWAP_INST 0x10
|
||||
|
||||
|
|
|
@ -10,6 +10,8 @@ typedef u32 kprobe_opcode_t;
|
|||
#define BREAKPOINT_INSTRUCTION_2 0x91d02071 /* ta 0x71 */
|
||||
#define MAX_INSN_SIZE 2
|
||||
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
||||
#define arch_remove_kprobe(p) do {} while (0)
|
||||
|
||||
#define flush_insn_slot(p) \
|
||||
|
|
|
@ -45,6 +45,8 @@ typedef u8 kprobe_opcode_t;
|
|||
#define ARCH_SUPPORTS_KRETPROBES
|
||||
#define flush_insn_slot(p) do { } while (0)
|
||||
|
||||
extern const int kretprobe_blacklist_size;
|
||||
|
||||
void arch_remove_kprobe(struct kprobe *p);
|
||||
void kretprobe_trampoline(void);
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@ typedef u8 kprobe_opcode_t;
|
|||
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
|
||||
|
||||
#define ARCH_SUPPORTS_KRETPROBES
|
||||
extern const int kretprobe_blacklist_size;
|
||||
|
||||
void kretprobe_trampoline(void);
|
||||
extern void arch_remove_kprobe(struct kprobe *p);
|
||||
|
|
|
@ -166,6 +166,12 @@ struct kretprobe_instance {
|
|||
struct task_struct *task;
|
||||
};
|
||||
|
||||
struct kretprobe_blackpoint {
|
||||
const char *name;
|
||||
void *addr;
|
||||
};
|
||||
extern struct kretprobe_blackpoint kretprobe_blacklist[];
|
||||
|
||||
static inline void kretprobe_assert(struct kretprobe_instance *ri,
|
||||
unsigned long orig_ret_address, unsigned long trampoline_address)
|
||||
{
|
||||
|
|
|
@ -716,6 +716,18 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
|
|||
int ret = 0;
|
||||
struct kretprobe_instance *inst;
|
||||
int i;
|
||||
void *addr = rp->kp.addr;
|
||||
|
||||
if (kretprobe_blacklist_size) {
|
||||
if (addr == NULL)
|
||||
kprobe_lookup_name(rp->kp.symbol_name, addr);
|
||||
addr += rp->kp.offset;
|
||||
|
||||
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
|
||||
if (kretprobe_blacklist[i].addr == addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
rp->kp.pre_handler = pre_handler_kretprobe;
|
||||
rp->kp.post_handler = NULL;
|
||||
|
@ -794,6 +806,17 @@ static int __init init_kprobes(void)
|
|||
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
|
||||
}
|
||||
|
||||
if (kretprobe_blacklist_size) {
|
||||
/* lookup the function address from its name */
|
||||
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
|
||||
kprobe_lookup_name(kretprobe_blacklist[i].name,
|
||||
kretprobe_blacklist[i].addr);
|
||||
if (!kretprobe_blacklist[i].addr)
|
||||
printk("kretprobe: lookup failed: %s\n",
|
||||
kretprobe_blacklist[i].name);
|
||||
}
|
||||
}
|
||||
|
||||
/* By default, kprobes are enabled */
|
||||
kprobe_enabled = true;
|
||||
|
||||
|
|
Loading…
Reference in New Issue