kprobes: Remove redundant text_mutex lock in optimize
Remove text_mutex locking in optimize_all_kprobes, because this function doesn't modify text. It simply queues probes on optimization list for kprobe_optimizer worker thread. Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: Namhyung Kim <namhyung@gmail.com> Cc: Jason Baron <jbaron@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101025131801.19160.70939.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
2c78ffeca9
commit
43948f5027
|
@ -74,7 +74,8 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
|
||||||
/* NOTE: change this value only with kprobe_mutex held */
|
/* NOTE: change this value only with kprobe_mutex held */
|
||||||
static bool kprobes_all_disarmed;
|
static bool kprobes_all_disarmed;
|
||||||
|
|
||||||
static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
|
/* This protects kprobe_table and optimizing_list */
|
||||||
|
static DEFINE_MUTEX(kprobe_mutex);
|
||||||
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
|
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
|
||||||
static struct {
|
static struct {
|
||||||
spinlock_t lock ____cacheline_aligned_in_smp;
|
spinlock_t lock ____cacheline_aligned_in_smp;
|
||||||
|
@ -595,6 +596,7 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SYSCTL
|
#ifdef CONFIG_SYSCTL
|
||||||
|
/* This should be called with kprobe_mutex locked */
|
||||||
static void __kprobes optimize_all_kprobes(void)
|
static void __kprobes optimize_all_kprobes(void)
|
||||||
{
|
{
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
|
@ -607,17 +609,16 @@ static void __kprobes optimize_all_kprobes(void)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
kprobes_allow_optimization = true;
|
kprobes_allow_optimization = true;
|
||||||
mutex_lock(&text_mutex);
|
|
||||||
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
||||||
head = &kprobe_table[i];
|
head = &kprobe_table[i];
|
||||||
hlist_for_each_entry_rcu(p, node, head, hlist)
|
hlist_for_each_entry_rcu(p, node, head, hlist)
|
||||||
if (!kprobe_disabled(p))
|
if (!kprobe_disabled(p))
|
||||||
optimize_kprobe(p);
|
optimize_kprobe(p);
|
||||||
}
|
}
|
||||||
mutex_unlock(&text_mutex);
|
|
||||||
printk(KERN_INFO "Kprobes globally optimized\n");
|
printk(KERN_INFO "Kprobes globally optimized\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This should be called with kprobe_mutex locked */
|
||||||
static void __kprobes unoptimize_all_kprobes(void)
|
static void __kprobes unoptimize_all_kprobes(void)
|
||||||
{
|
{
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
|
|
Loading…
Reference in New Issue