kprobes: codingstyle cleanups

Remove superflous braces and fix indentation aswell as comments.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Christoph Hellwig 2007-05-08 00:34:13 -07:00 committed by Linus Torvalds
parent b0bb501651
commit 6f716acd5f
1 changed files with 30 additions and 25 deletions

View File

@ -133,7 +133,7 @@ kprobe_opcode_t __kprobes *get_insn_slot(void)
struct kprobe_insn_page *kip; struct kprobe_insn_page *kip;
struct hlist_node *pos; struct hlist_node *pos;
retry: retry:
hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
if (kip->nused < INSNS_PER_PAGE) { if (kip->nused < INSNS_PER_PAGE) {
int i; int i;
@ -155,9 +155,8 @@ kprobe_opcode_t __kprobes *get_insn_slot(void)
} }
/* All out of space. Need to allocate a new page. Use slot 0. */ /* All out of space. Need to allocate a new page. Use slot 0. */
kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
if (!kip) { if (!kip)
return NULL; return NULL;
}
/* /*
* Use module_alloc so this page is within +/- 2GB of where the * Use module_alloc so this page is within +/- 2GB of where the
@ -246,9 +245,9 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
break; break;
} }
} }
if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
collect_garbage_slots(); collect_garbage_slots();
}
} }
#endif #endif
@ -314,7 +313,6 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
reset_kprobe_instance(); reset_kprobe_instance();
} }
} }
return;
} }
static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
@ -533,8 +531,8 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
static int __kprobes in_kprobes_functions(unsigned long addr) static int __kprobes in_kprobes_functions(unsigned long addr)
{ {
if (addr >= (unsigned long)__kprobes_text_start if (addr >= (unsigned long)__kprobes_text_start &&
&& addr < (unsigned long)__kprobes_text_end) addr < (unsigned long)__kprobes_text_end)
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
@ -561,19 +559,24 @@ static int __kprobes __register_kprobe(struct kprobe *p,
return -EINVAL; return -EINVAL;
p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset); p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
if ((!kernel_text_address((unsigned long) p->addr)) || if (!kernel_text_address((unsigned long) p->addr) ||
in_kprobes_functions((unsigned long) p->addr)) in_kprobes_functions((unsigned long) p->addr))
return -EINVAL; return -EINVAL;
p->mod_refcounted = 0; p->mod_refcounted = 0;
/* Check are we probing a module */
if ((probed_mod = module_text_address((unsigned long) p->addr))) { /*
* Check if are we probing a module.
*/
probed_mod = module_text_address((unsigned long) p->addr);
if (probed_mod) {
struct module *calling_mod = module_text_address(called_from); struct module *calling_mod = module_text_address(called_from);
/* We must allow modules to probe themself and /*
* in this case avoid incrementing the module refcount, * We must allow modules to probe themself and in this case
* so as to allow unloading of self probing modules. * avoid incrementing the module refcount, so as to allow
* unloading of self probing modules.
*/ */
if (calling_mod && (calling_mod != probed_mod)) { if (calling_mod && calling_mod != probed_mod) {
if (unlikely(!try_module_get(probed_mod))) if (unlikely(!try_module_get(probed_mod)))
return -EINVAL; return -EINVAL;
p->mod_refcounted = 1; p->mod_refcounted = 1;
@ -591,7 +594,8 @@ static int __kprobes __register_kprobe(struct kprobe *p,
goto out; goto out;
} }
if ((ret = arch_prepare_kprobe(p)) != 0) ret = arch_prepare_kprobe(p);
if (ret)
goto out; goto out;
INIT_HLIST_NODE(&p->hlist); INIT_HLIST_NODE(&p->hlist);
@ -614,8 +618,7 @@ out:
int __kprobes register_kprobe(struct kprobe *p) int __kprobes register_kprobe(struct kprobe *p)
{ {
return __register_kprobe(p, return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
(unsigned long)__builtin_return_address(0));
} }
void __kprobes unregister_kprobe(struct kprobe *p) void __kprobes unregister_kprobe(struct kprobe *p)
@ -639,9 +642,9 @@ void __kprobes unregister_kprobe(struct kprobe *p)
return; return;
} }
valid_p: valid_p:
if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && if (old_p == p ||
(p->list.next == &old_p->list) && (old_p->pre_handler == aggr_pre_handler &&
(p->list.prev == &old_p->list))) { p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
/* Only probe on the hash list */ /* Only probe on the hash list */
arch_disarm_kprobe(p); arch_disarm_kprobe(p);
hlist_del_rcu(&old_p->hlist); hlist_del_rcu(&old_p->hlist);
@ -654,9 +657,11 @@ valid_p:
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
synchronize_sched(); synchronize_sched();
if (p->mod_refcounted && if (p->mod_refcounted) {
(mod = module_text_address((unsigned long)p->addr))) mod = module_text_address((unsigned long)p->addr);
module_put(mod); if (mod)
module_put(mod);
}
if (cleanup_p) { if (cleanup_p) {
if (p != old_p) { if (p != old_p) {