tracing: make tracer_init_tracefs initcall asynchronous
Move trace_eval_init() to subsys_initcall to make it start earlier. And to avoid tracer_init_tracefs being blocked by trace_event_sem which trace_eval_init() hold [1], queue tracer_init_tracefs() to eval_map_wq to let the two works being executed sequentially. It can speed up the initialization of kernel as result of making tracer_init_tracefs asynchronous. On my arm64 platform, it reduce ~20ms of 125ms which total time do_initcalls spend. Link: https://lkml.kernel.org/r/20220426122407.17042-3-mark-pk.tsai@mediatek.com [1]: https://lore.kernel.org/r/68d7b3327052757d0cd6359a6c9015a85b437232.camel@pengutronix.de Signed-off-by: Mark-PK Tsai <mark-pk.tsai@mediatek.com> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
parent
ef9188bcc6
commit
6621a70046
|
@ -9615,6 +9615,7 @@ extern struct trace_eval_map *__stop_ftrace_eval_maps[];
|
||||||
|
|
||||||
static struct workqueue_struct *eval_map_wq __initdata;
|
static struct workqueue_struct *eval_map_wq __initdata;
|
||||||
static struct work_struct eval_map_work __initdata;
|
static struct work_struct eval_map_work __initdata;
|
||||||
|
static struct work_struct tracerfs_init_work __initdata;
|
||||||
|
|
||||||
static void __init eval_map_work_func(struct work_struct *work)
|
static void __init eval_map_work_func(struct work_struct *work)
|
||||||
{
|
{
|
||||||
|
@ -9640,6 +9641,8 @@ static int __init trace_eval_init(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
subsys_initcall(trace_eval_init);
|
||||||
|
|
||||||
static int __init trace_eval_sync(void)
|
static int __init trace_eval_sync(void)
|
||||||
{
|
{
|
||||||
/* Make sure the eval map updates are finished */
|
/* Make sure the eval map updates are finished */
|
||||||
|
@ -9722,15 +9725,8 @@ static struct notifier_block trace_module_nb = {
|
||||||
};
|
};
|
||||||
#endif /* CONFIG_MODULES */
|
#endif /* CONFIG_MODULES */
|
||||||
|
|
||||||
static __init int tracer_init_tracefs(void)
|
static __init void tracer_init_tracefs_work_func(struct work_struct *work)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
|
|
||||||
trace_access_lock_init();
|
|
||||||
|
|
||||||
ret = tracing_init_dentry();
|
|
||||||
if (ret)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
event_trace_init();
|
event_trace_init();
|
||||||
|
|
||||||
|
@ -9752,8 +9748,6 @@ static __init int tracer_init_tracefs(void)
|
||||||
trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
|
trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
|
||||||
NULL, &tracing_saved_tgids_fops);
|
NULL, &tracing_saved_tgids_fops);
|
||||||
|
|
||||||
trace_eval_init();
|
|
||||||
|
|
||||||
trace_create_eval_file(NULL);
|
trace_create_eval_file(NULL);
|
||||||
|
|
||||||
#ifdef CONFIG_MODULES
|
#ifdef CONFIG_MODULES
|
||||||
|
@ -9768,6 +9762,24 @@ static __init int tracer_init_tracefs(void)
|
||||||
create_trace_instances(NULL);
|
create_trace_instances(NULL);
|
||||||
|
|
||||||
update_tracer_options(&global_trace);
|
update_tracer_options(&global_trace);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __init int tracer_init_tracefs(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
trace_access_lock_init();
|
||||||
|
|
||||||
|
ret = tracing_init_dentry();
|
||||||
|
if (ret)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (eval_map_wq) {
|
||||||
|
INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
|
||||||
|
queue_work(eval_map_wq, &tracerfs_init_work);
|
||||||
|
} else {
|
||||||
|
tracer_init_tracefs_work_func(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue