2016-02-18 11:58:59 +08:00
|
|
|
/* Copyright (c) 2016 Facebook
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
|
|
* License as published by the Free Software Foundation.
|
|
|
|
*/
|
2023-08-18 17:01:12 +08:00
|
|
|
#include "vmlinux.h"
|
2016-02-18 11:58:59 +08:00
|
|
|
#include <linux/version.h>
|
2020-05-14 02:02:23 +08:00
|
|
|
#include <bpf/bpf_helpers.h>
|
|
|
|
#include <bpf/bpf_tracing.h>
|
2023-08-18 17:01:15 +08:00
|
|
|
#include <bpf/bpf_core_read.h>
|
2016-02-18 11:58:59 +08:00
|
|
|
|
2023-08-18 17:01:12 +08:00
|
|
|
#ifndef PERF_MAX_STACK_DEPTH
|
|
|
|
#define PERF_MAX_STACK_DEPTH 127
|
|
|
|
#endif
|
|
|
|
|
2016-02-18 11:58:59 +08:00
|
|
|
#define MINBLOCK_US 1
|
2021-08-15 14:50:13 +08:00
|
|
|
#define MAX_ENTRIES 10000
|
2016-02-18 11:58:59 +08:00
|
|
|
|
|
|
|
struct key_t {
|
|
|
|
char waker[TASK_COMM_LEN];
|
|
|
|
char target[TASK_COMM_LEN];
|
|
|
|
u32 wret;
|
|
|
|
u32 tret;
|
|
|
|
};
|
|
|
|
|
2020-08-23 16:53:34 +08:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
|
|
__type(key, struct key_t);
|
|
|
|
__type(value, u64);
|
2021-08-15 14:50:13 +08:00
|
|
|
__uint(max_entries, MAX_ENTRIES);
|
2020-08-23 16:53:34 +08:00
|
|
|
} counts SEC(".maps");
|
2016-02-18 11:58:59 +08:00
|
|
|
|
2020-08-23 16:53:34 +08:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, u64);
|
2021-08-15 14:50:13 +08:00
|
|
|
__uint(max_entries, MAX_ENTRIES);
|
2020-08-23 16:53:34 +08:00
|
|
|
} start SEC(".maps");
|
2016-02-18 11:58:59 +08:00
|
|
|
|
|
|
|
struct wokeby_t {
|
|
|
|
char name[TASK_COMM_LEN];
|
|
|
|
u32 ret;
|
|
|
|
};
|
|
|
|
|
2020-08-23 16:53:34 +08:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, struct wokeby_t);
|
2021-08-15 14:50:13 +08:00
|
|
|
__uint(max_entries, MAX_ENTRIES);
|
2020-08-23 16:53:34 +08:00
|
|
|
} wokeby SEC(".maps");
|
2016-02-18 11:58:59 +08:00
|
|
|
|
2020-08-23 16:53:34 +08:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
|
|
|
|
__uint(key_size, sizeof(u32));
|
|
|
|
__uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
|
2021-08-15 14:50:13 +08:00
|
|
|
__uint(max_entries, MAX_ENTRIES);
|
2020-08-23 16:53:34 +08:00
|
|
|
} stackmap SEC(".maps");
|
2016-02-18 11:58:59 +08:00
|
|
|
|
|
|
|
#define STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
|
|
|
|
|
|
|
|
SEC("kprobe/try_to_wake_up")
|
|
|
|
int waker(struct pt_regs *ctx)
|
|
|
|
{
|
2023-08-18 17:01:15 +08:00
|
|
|
struct task_struct *p = (void *)PT_REGS_PARM1_CORE(ctx);
|
|
|
|
u32 pid = BPF_CORE_READ(p, pid);
|
2016-04-13 06:10:53 +08:00
|
|
|
struct wokeby_t woke;
|
2016-02-18 11:58:59 +08:00
|
|
|
|
|
|
|
bpf_get_current_comm(&woke.name, sizeof(woke.name));
|
|
|
|
woke.ret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
|
|
|
|
|
|
|
|
bpf_map_update_elem(&wokeby, &pid, &woke, BPF_ANY);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-07 09:43:30 +08:00
|
|
|
static inline int update_counts(void *ctx, u32 pid, u64 delta)
|
2016-02-18 11:58:59 +08:00
|
|
|
{
|
|
|
|
struct wokeby_t *woke;
|
|
|
|
u64 zero = 0, *val;
|
2016-04-13 06:10:53 +08:00
|
|
|
struct key_t key;
|
2016-02-18 11:58:59 +08:00
|
|
|
|
2016-04-13 06:10:53 +08:00
|
|
|
__builtin_memset(&key.waker, 0, sizeof(key.waker));
|
2016-02-18 11:58:59 +08:00
|
|
|
bpf_get_current_comm(&key.target, sizeof(key.target));
|
|
|
|
key.tret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
|
2016-04-13 06:10:53 +08:00
|
|
|
key.wret = 0;
|
2016-02-18 11:58:59 +08:00
|
|
|
|
|
|
|
woke = bpf_map_lookup_elem(&wokeby, &pid);
|
|
|
|
if (woke) {
|
|
|
|
key.wret = woke->ret;
|
2016-04-13 06:10:53 +08:00
|
|
|
__builtin_memcpy(&key.waker, woke->name, sizeof(key.waker));
|
2016-02-18 11:58:59 +08:00
|
|
|
bpf_map_delete_elem(&wokeby, &pid);
|
|
|
|
}
|
|
|
|
|
|
|
|
val = bpf_map_lookup_elem(&counts, &key);
|
|
|
|
if (!val) {
|
|
|
|
bpf_map_update_elem(&counts, &key, &zero, BPF_NOEXIST);
|
|
|
|
val = bpf_map_lookup_elem(&counts, &key);
|
|
|
|
if (!val)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
(*val) += delta;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-07 09:43:30 +08:00
|
|
|
#if 1
|
2023-03-14 04:56:27 +08:00
|
|
|
/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */
|
2016-04-07 09:43:30 +08:00
|
|
|
SEC("tracepoint/sched/sched_switch")
|
2023-08-18 17:01:12 +08:00
|
|
|
int oncpu(struct trace_event_raw_sched_switch *ctx)
|
2016-04-07 09:43:30 +08:00
|
|
|
{
|
|
|
|
/* record previous thread sleep time */
|
|
|
|
u32 pid = ctx->prev_pid;
|
|
|
|
#else
|
2023-08-18 17:01:14 +08:00
|
|
|
SEC("kprobe.multi/finish_task_switch*")
|
2016-02-18 11:58:59 +08:00
|
|
|
int oncpu(struct pt_regs *ctx)
|
|
|
|
{
|
2023-08-18 17:01:15 +08:00
|
|
|
struct task_struct *p = (void *)PT_REGS_PARM1_CORE(ctx);
|
2016-04-07 09:43:30 +08:00
|
|
|
/* record previous thread sleep time */
|
2023-08-18 17:01:15 +08:00
|
|
|
u32 pid = BPF_CORE_READ(p, pid);
|
2016-04-07 09:43:30 +08:00
|
|
|
#endif
|
2016-02-18 11:58:59 +08:00
|
|
|
u64 delta, ts, *tsp;
|
|
|
|
|
|
|
|
ts = bpf_ktime_get_ns();
|
|
|
|
bpf_map_update_elem(&start, &pid, &ts, BPF_ANY);
|
|
|
|
|
|
|
|
/* calculate current thread's delta time */
|
|
|
|
pid = bpf_get_current_pid_tgid();
|
|
|
|
tsp = bpf_map_lookup_elem(&start, &pid);
|
|
|
|
if (!tsp)
|
|
|
|
/* missed start or filtered */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
delta = bpf_ktime_get_ns() - *tsp;
|
|
|
|
bpf_map_delete_elem(&start, &pid);
|
|
|
|
delta = delta / 1000;
|
|
|
|
if (delta < MINBLOCK_US)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return update_counts(ctx, pid, delta);
|
|
|
|
}
|
|
|
|
char _license[] SEC("license") = "GPL";
|
|
|
|
u32 _version SEC("version") = LINUX_VERSION_CODE;
|