OpenCloudOS-Kernel/tools/perf/util/thread.c

176 lines
3.4 KiB
C
Raw Normal View History

#include "../perf.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "thread.h"
#include "util.h"
#include "debug.h"
static struct thread *thread__new(pid_t pid)
{
perf sched: Add 'perf sched map' scheduling event map printout This prints a textual context-switching outline of workload captured via perf sched record. For example, on a 16 CPU box it outputs: N1 O1 . . . S1 . . . B0 . *I0 C1 . M1 . 23002.773423 secs N1 O1 . *Q0 . S1 . . . B0 . I0 C1 . M1 . 23002.773423 secs N1 O1 . Q0 . S1 . . . B0 . *R1 C1 . M1 . 23002.773485 secs N1 O1 . Q0 . S1 . *S0 . B0 . R1 C1 . M1 . 23002.773478 secs *L0 O1 . Q0 . S1 . S0 . B0 . R1 C1 . M1 . 23002.773523 secs L0 O1 . *. . S1 . S0 . B0 . R1 C1 . M1 . 23002.773531 secs L0 O1 . . . S1 . S0 . B0 . R1 C1 *T1 M1 . 23002.773547 secs T1 => irqbalance:2089 L0 O1 . . . S1 . S0 . *P0 . R1 C1 T1 M1 . 23002.773549 secs *N1 O1 . . . S1 . S0 . P0 . R1 C1 T1 M1 . 23002.773566 secs N1 O1 . . . *J0 . S0 . P0 . R1 C1 T1 M1 . 23002.773571 secs N1 O1 . . . J0 . S0 *B0 P0 . R1 C1 T1 M1 . 23002.773592 secs N1 O1 . . . J0 . *U0 B0 P0 . R1 C1 T1 M1 . 23002.773582 secs N1 O1 . . . *S1 . U0 B0 P0 . R1 C1 T1 M1 . 23002.773604 secs N1 O1 . . . S1 . U0 B0 *. . R1 C1 T1 M1 . 23002.773615 secs N1 O1 . . . S1 . U0 B0 . . *K0 C1 T1 M1 . 23002.773631 secs N1 O1 . *M0 . S1 . U0 B0 . . K0 C1 T1 M1 . 23002.773624 secs N1 O1 . M0 . S1 . U0 *. . . K0 C1 T1 M1 . 23002.773644 secs N1 O1 . M0 . S1 . U0 . . . *R1 C1 T1 M1 . 23002.773662 secs N1 O1 . M0 . S1 . *. . . . R1 C1 T1 M1 . 23002.773648 secs N1 O1 . *. . S1 . . . . . R1 C1 T1 M1 . 23002.773680 secs N1 O1 . . . *L0 . . . . . R1 C1 T1 M1 . 23002.773717 secs *N0 O1 . . . L0 . . . . . R1 C1 T1 M1 . 23002.773709 secs *N1 O1 . . . L0 . . . . . R1 C1 T1 M1 . 23002.773747 secs Columns stand for individual CPUs, from CPU0 to CPU15, and the two-letter shortcuts stand for tasks that are running on a CPU. '*' denotes the CPU that had the event. A dot signals an idle CPU. New tasks are assigned new two-letter shortcuts - when they occur first they are printed. In the above example 'T1' stood for irqbalance: T1 => irqbalance:2089 Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-16 23:40:48 +08:00
struct thread *self = calloc(1, sizeof(*self));
if (self != NULL) {
self->pid = pid;
self->comm = malloc(32);
if (self->comm)
snprintf(self->comm, 32, ":%d", self->pid);
INIT_LIST_HEAD(&self->maps);
}
return self;
}
int thread__set_comm(struct thread *self, const char *comm)
{
if (self->comm)
free(self->comm);
self->comm = strdup(comm);
return self->comm ? 0 : -ENOMEM;
}
static size_t thread__fprintf(struct thread *self, FILE *fp)
{
struct map *pos;
size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
list_for_each_entry(pos, &self->maps, node)
ret += map__fprintf(pos, fp);
return ret;
}
struct thread *
threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
{
struct rb_node **p = &threads->rb_node;
struct rb_node *parent = NULL;
struct thread *th;
/*
* Font-end cache - PID lookups come in blocks,
* so most of the time we dont have to look up
* the full rbtree:
*/
if (*last_match && (*last_match)->pid == pid)
return *last_match;
while (*p != NULL) {
parent = *p;
th = rb_entry(parent, struct thread, rb_node);
if (th->pid == pid) {
*last_match = th;
return th;
}
if (pid < th->pid)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
th = thread__new(pid);
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
rb_insert_color(&th->rb_node, threads);
*last_match = th;
}
return th;
}
struct thread *
register_idle_thread(struct rb_root *threads, struct thread **last_match)
{
struct thread *thread = threads__findnew(0, threads, last_match);
if (!thread || thread__set_comm(thread, "swapper")) {
fprintf(stderr, "problem inserting idle task.\n");
exit(-1);
}
return thread;
}
void thread__insert_map(struct thread *self, struct map *map)
{
struct map *pos, *tmp;
list_for_each_entry_safe(pos, tmp, &self->maps, node) {
if (map__overlap(pos, map)) {
if (verbose >= 2) {
printf("overlapping maps:\n");
map__fprintf(map, stdout);
map__fprintf(pos, stdout);
}
if (map->start <= pos->start && map->end > pos->start)
pos->start = map->end;
if (map->end >= pos->end && map->start < pos->end)
pos->end = map->start;
if (verbose >= 2) {
printf("after collision:\n");
map__fprintf(pos, stdout);
}
if (pos->start >= pos->end) {
list_del_init(&pos->node);
free(pos);
}
}
}
list_add_tail(&map->node, &self->maps);
}
int thread__fork(struct thread *self, struct thread *parent)
{
struct map *map;
if (self->comm)
free(self->comm);
self->comm = strdup(parent->comm);
if (!self->comm)
return -ENOMEM;
list_for_each_entry(map, &parent->maps, node) {
struct map *new = map__clone(map);
if (!new)
return -ENOMEM;
thread__insert_map(self, new);
}
return 0;
}
struct map *thread__find_map(struct thread *self, u64 ip)
{
struct map *pos;
if (self == NULL)
return NULL;
list_for_each_entry(pos, &self->maps, node)
if (ip >= pos->start && ip <= pos->end)
return pos;
return NULL;
}
size_t threads__fprintf(FILE *fp, struct rb_root *threads)
{
size_t ret = 0;
struct rb_node *nd;
for (nd = rb_first(threads); nd; nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
}
return ret;
}