tracing: Iterate trace_[ku]probe objects directly
As suggested by Linus [1] using list_for_each_entry to iterate directly trace_[ku]probe objects so we can skip another call to container_of in these loops. [1] https://lore.kernel.org/r/CAHk-=wjakjw6-rDzDDBsuMoDCqd+9ogifR_EE1F0K-jYek1CdA@mail.gmail.com Link: https://lkml.kernel.org/r/20211125202852.406405-1-jolsa@kernel.org Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Jiri Olsa <jolsa@kernel.org> Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
ee34c52c71
commit
e161c6bf39
|
@ -327,11 +327,9 @@ static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
|
|||
|
||||
static void __disable_trace_kprobe(struct trace_probe *tp)
|
||||
{
|
||||
struct trace_probe *pos;
|
||||
struct trace_kprobe *tk;
|
||||
|
||||
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
||||
tk = container_of(pos, struct trace_kprobe, tp);
|
||||
list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
|
||||
if (!trace_kprobe_is_registered(tk))
|
||||
continue;
|
||||
if (trace_kprobe_is_return(tk))
|
||||
|
@ -348,7 +346,7 @@ static void __disable_trace_kprobe(struct trace_probe *tp)
|
|||
static int enable_trace_kprobe(struct trace_event_call *call,
|
||||
struct trace_event_file *file)
|
||||
{
|
||||
struct trace_probe *pos, *tp;
|
||||
struct trace_probe *tp;
|
||||
struct trace_kprobe *tk;
|
||||
bool enabled;
|
||||
int ret = 0;
|
||||
|
@ -369,8 +367,7 @@ static int enable_trace_kprobe(struct trace_event_call *call,
|
|||
if (enabled)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
||||
tk = container_of(pos, struct trace_kprobe, tp);
|
||||
list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
|
||||
if (trace_kprobe_has_gone(tk))
|
||||
continue;
|
||||
ret = __enable_trace_kprobe(tk);
|
||||
|
@ -559,11 +556,9 @@ static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
|
|||
struct trace_kprobe *comp)
|
||||
{
|
||||
struct trace_probe_event *tpe = orig->tp.event;
|
||||
struct trace_probe *pos;
|
||||
int i;
|
||||
|
||||
list_for_each_entry(pos, &tpe->probes, list) {
|
||||
orig = container_of(pos, struct trace_kprobe, tp);
|
||||
list_for_each_entry(orig, &tpe->probes, tp.list) {
|
||||
if (strcmp(trace_kprobe_symbol(orig),
|
||||
trace_kprobe_symbol(comp)) ||
|
||||
trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
|
||||
|
|
|
@ -409,12 +409,10 @@ static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
|
|||
struct trace_uprobe *comp)
|
||||
{
|
||||
struct trace_probe_event *tpe = orig->tp.event;
|
||||
struct trace_probe *pos;
|
||||
struct inode *comp_inode = d_real_inode(comp->path.dentry);
|
||||
int i;
|
||||
|
||||
list_for_each_entry(pos, &tpe->probes, list) {
|
||||
orig = container_of(pos, struct trace_uprobe, tp);
|
||||
list_for_each_entry(orig, &tpe->probes, tp.list) {
|
||||
if (comp_inode != d_real_inode(orig->path.dentry) ||
|
||||
comp->offset != orig->offset)
|
||||
continue;
|
||||
|
@ -1072,14 +1070,12 @@ static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
|
|||
|
||||
static void __probe_event_disable(struct trace_probe *tp)
|
||||
{
|
||||
struct trace_probe *pos;
|
||||
struct trace_uprobe *tu;
|
||||
|
||||
tu = container_of(tp, struct trace_uprobe, tp);
|
||||
WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
|
||||
|
||||
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
||||
tu = container_of(pos, struct trace_uprobe, tp);
|
||||
list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
|
||||
if (!tu->inode)
|
||||
continue;
|
||||
|
||||
|
@ -1091,7 +1087,7 @@ static void __probe_event_disable(struct trace_probe *tp)
|
|||
static int probe_event_enable(struct trace_event_call *call,
|
||||
struct trace_event_file *file, filter_func_t filter)
|
||||
{
|
||||
struct trace_probe *pos, *tp;
|
||||
struct trace_probe *tp;
|
||||
struct trace_uprobe *tu;
|
||||
bool enabled;
|
||||
int ret;
|
||||
|
@ -1126,8 +1122,7 @@ static int probe_event_enable(struct trace_event_call *call,
|
|||
if (ret)
|
||||
goto err_flags;
|
||||
|
||||
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
||||
tu = container_of(pos, struct trace_uprobe, tp);
|
||||
list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
|
||||
ret = trace_uprobe_enable(tu, filter);
|
||||
if (ret) {
|
||||
__probe_event_disable(tp);
|
||||
|
@ -1272,7 +1267,7 @@ static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
|
|||
static int uprobe_perf_close(struct trace_event_call *call,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct trace_probe *pos, *tp;
|
||||
struct trace_probe *tp;
|
||||
struct trace_uprobe *tu;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1284,8 +1279,7 @@ static int uprobe_perf_close(struct trace_event_call *call,
|
|||
if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
||||
tu = container_of(pos, struct trace_uprobe, tp);
|
||||
list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
|
||||
ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -1297,7 +1291,7 @@ static int uprobe_perf_close(struct trace_event_call *call,
|
|||
static int uprobe_perf_open(struct trace_event_call *call,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct trace_probe *pos, *tp;
|
||||
struct trace_probe *tp;
|
||||
struct trace_uprobe *tu;
|
||||
int err = 0;
|
||||
|
||||
|
@ -1309,8 +1303,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
|
|||
if (trace_uprobe_filter_add(tu->tp.event->filter, event))
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
|
||||
tu = container_of(pos, struct trace_uprobe, tp);
|
||||
list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
|
||||
err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
|
||||
if (err) {
|
||||
uprobe_perf_close(call, event);
|
||||
|
|
Loading…
Reference in New Issue