2009-08-14 18:21:53 +08:00
|
|
|
#include "../perf.h"
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
2009-12-14 05:50:28 +08:00
|
|
|
#include "session.h"
|
2009-08-14 18:21:53 +08:00
|
|
|
#include "thread.h"
|
|
|
|
#include "util.h"
|
2009-08-18 23:04:03 +08:00
|
|
|
#include "debug.h"
|
2009-08-14 18:21:53 +08:00
|
|
|
|
2010-03-18 22:36:05 +08:00
|
|
|
int find_all_tid(int pid, pid_t ** all_tid)
|
|
|
|
{
|
|
|
|
char name[256];
|
|
|
|
int items;
|
|
|
|
struct dirent **namelist = NULL;
|
|
|
|
int ret = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
sprintf(name, "/proc/%d/task", pid);
|
|
|
|
items = scandir(name, &namelist, NULL, NULL);
|
|
|
|
if (items <= 0)
|
|
|
|
return -ENOENT;
|
|
|
|
*all_tid = malloc(sizeof(pid_t) * items);
|
|
|
|
if (!*all_tid) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < items; i++)
|
|
|
|
(*all_tid)[i] = atoi(namelist[i]->d_name);
|
|
|
|
|
|
|
|
ret = items;
|
|
|
|
|
|
|
|
failure:
|
|
|
|
for (i=0; i<items; i++)
|
|
|
|
free(namelist[i]);
|
|
|
|
free(namelist);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-12-12 00:50:36 +08:00
|
|
|
void map_groups__init(struct map_groups *self)
|
2009-11-28 02:29:20 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < MAP__NR_TYPES; ++i) {
|
|
|
|
self->maps[i] = RB_ROOT;
|
|
|
|
INIT_LIST_HEAD(&self->removed_maps[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-09 03:04:17 +08:00
|
|
|
static struct thread *thread__new(pid_t pid)
|
2009-08-14 18:21:53 +08:00
|
|
|
{
|
2009-11-24 22:05:16 +08:00
|
|
|
struct thread *self = zalloc(sizeof(*self));
|
2009-08-14 18:21:53 +08:00
|
|
|
|
|
|
|
if (self != NULL) {
|
2009-12-12 00:50:36 +08:00
|
|
|
map_groups__init(&self->mg);
|
|
|
|
self->pid = pid;
|
2009-10-09 03:04:17 +08:00
|
|
|
self->comm = malloc(32);
|
|
|
|
if (self->comm)
|
|
|
|
snprintf(self->comm, 32, ":%d", self->pid);
|
2009-08-14 18:21:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2010-02-26 23:08:34 +08:00
|
|
|
static void map_groups__flush(struct map_groups *self)
|
|
|
|
{
|
|
|
|
int type;
|
|
|
|
|
|
|
|
for (type = 0; type < MAP__NR_TYPES; type++) {
|
|
|
|
struct rb_root *root = &self->maps[type];
|
|
|
|
struct rb_node *next = rb_first(root);
|
|
|
|
|
|
|
|
while (next) {
|
|
|
|
struct map *pos = rb_entry(next, struct map, rb_node);
|
|
|
|
next = rb_next(&pos->rb_node);
|
|
|
|
rb_erase(&pos->rb_node, root);
|
|
|
|
/*
|
|
|
|
* We may have references to this map, for
|
|
|
|
* instance in some hist_entry instances, so
|
|
|
|
* just move them to a separate list.
|
|
|
|
*/
|
|
|
|
list_add_tail(&pos->node, &self->removed_maps[pos->type]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-14 18:21:53 +08:00
|
|
|
int thread__set_comm(struct thread *self, const char *comm)
|
|
|
|
{
|
2010-02-26 23:08:34 +08:00
|
|
|
int err;
|
|
|
|
|
2009-08-14 18:21:53 +08:00
|
|
|
if (self->comm)
|
|
|
|
free(self->comm);
|
|
|
|
self->comm = strdup(comm);
|
2010-02-26 23:08:34 +08:00
|
|
|
err = self->comm == NULL ? -ENOMEM : 0;
|
|
|
|
if (!err) {
|
|
|
|
self->comm_set = true;
|
|
|
|
map_groups__flush(&self->mg);
|
|
|
|
}
|
|
|
|
return err;
|
2009-08-14 18:21:53 +08:00
|
|
|
}
|
|
|
|
|
perf tools: Bind callchains to the first sort dimension column
Currently, the callchains are displayed using a constant left
margin. So depending on the current sort dimension
configuration, callchains may appear to be well attached to the
first sort dimension column field which is mostly the case,
except when the first dimension of sorting is done by comm,
because these are right aligned.
This patch binds the callchain to the first letter in the first
column, whatever type of column it is (dso, comm, symbol).
Before:
0.80% perf [k] __lock_acquire
__lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
| | __fsnotify_parent
After:
0.80% perf [k] __lock_acquire
__lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
| | __fsnotify_parent
Also, for clarity, we don't put anymore the callchain as is but:
- If we have a top level ancestor in the callchain, start it
with a first ascii hook.
Before:
0.80% perf [kernel] [k] __lock_acquire
__lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
[..] [..]
After:
0.80% perf [kernel] [k] __lock_acquire
|
--- __lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
[..] [..]
- Otherwise, if we have several top level ancestors, then
display these like we did before:
1.69% Xorg
|
|--21.21%-- vread_hpet
| 0x7fffd85b46fc
| 0x7fffd85b494d
| 0x7f4fafb4e54d
|
|--15.15%-- exaOffscreenAlloc
|
|--9.09%-- I830WaitLpRing
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Anton Blanchard <anton@samba.org>
LKML-Reference: <1256246604-17156-2-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-10-23 05:23:23 +08:00
|
|
|
int thread__comm_len(struct thread *self)
|
|
|
|
{
|
|
|
|
if (!self->comm_len) {
|
|
|
|
if (!self->comm)
|
|
|
|
return 0;
|
|
|
|
self->comm_len = strlen(self->comm);
|
|
|
|
}
|
|
|
|
|
|
|
|
return self->comm_len;
|
|
|
|
}
|
|
|
|
|
2010-03-10 02:58:17 +08:00
|
|
|
size_t __map_groups__fprintf_maps(struct map_groups *self,
|
|
|
|
enum map_type type, FILE *fp)
|
2009-08-14 18:21:53 +08:00
|
|
|
{
|
2009-11-28 02:29:20 +08:00
|
|
|
size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
|
2009-09-29 01:48:46 +08:00
|
|
|
struct rb_node *nd;
|
2009-08-14 18:21:53 +08:00
|
|
|
|
2009-11-28 02:29:20 +08:00
|
|
|
for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
|
|
|
|
struct map *pos = rb_entry(nd, struct map, rb_node);
|
|
|
|
printed += fprintf(fp, "Map:");
|
|
|
|
printed += map__fprintf(pos, fp);
|
2010-03-10 02:58:17 +08:00
|
|
|
if (verbose > 2) {
|
2009-11-28 02:29:20 +08:00
|
|
|
printed += dso__fprintf(pos->dso, type, fp);
|
|
|
|
printed += fprintf(fp, "--\n");
|
|
|
|
}
|
2009-09-29 01:48:46 +08:00
|
|
|
}
|
2009-08-14 18:21:53 +08:00
|
|
|
|
2009-11-28 02:29:20 +08:00
|
|
|
return printed;
|
|
|
|
}
|
|
|
|
|
2009-12-12 00:50:36 +08:00
|
|
|
size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp)
|
2009-11-28 02:29:20 +08:00
|
|
|
{
|
|
|
|
size_t printed = 0, i;
|
|
|
|
for (i = 0; i < MAP__NR_TYPES; ++i)
|
2009-12-12 00:50:36 +08:00
|
|
|
printed += __map_groups__fprintf_maps(self, i, fp);
|
2009-11-28 02:29:20 +08:00
|
|
|
return printed;
|
|
|
|
}
|
2009-10-02 14:29:58 +08:00
|
|
|
|
2009-12-12 00:50:36 +08:00
|
|
|
static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
|
|
|
|
enum map_type type, FILE *fp)
|
2009-11-28 02:29:20 +08:00
|
|
|
{
|
|
|
|
struct map *pos;
|
|
|
|
size_t printed = 0;
|
|
|
|
|
|
|
|
list_for_each_entry(pos, &self->removed_maps[type], node) {
|
|
|
|
printed += fprintf(fp, "Map:");
|
|
|
|
printed += map__fprintf(pos, fp);
|
|
|
|
if (verbose > 1) {
|
|
|
|
printed += dso__fprintf(pos->dso, type, fp);
|
|
|
|
printed += fprintf(fp, "--\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return printed;
|
|
|
|
}
|
2009-10-02 14:29:58 +08:00
|
|
|
|
2009-12-12 00:50:36 +08:00
|
|
|
static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp)
|
2009-11-28 02:29:20 +08:00
|
|
|
{
|
|
|
|
size_t printed = 0, i;
|
|
|
|
for (i = 0; i < MAP__NR_TYPES; ++i)
|
2009-12-12 00:50:36 +08:00
|
|
|
printed += __map_groups__fprintf_removed_maps(self, i, fp);
|
2009-11-28 02:29:20 +08:00
|
|
|
return printed;
|
|
|
|
}
|
|
|
|
|
2009-12-12 00:50:36 +08:00
|
|
|
static size_t map_groups__fprintf(struct map_groups *self, FILE *fp)
|
2009-11-28 02:29:20 +08:00
|
|
|
{
|
2009-12-12 00:50:36 +08:00
|
|
|
size_t printed = map_groups__fprintf_maps(self, fp);
|
2009-11-28 02:29:20 +08:00
|
|
|
printed += fprintf(fp, "Removed maps:\n");
|
2009-12-12 00:50:36 +08:00
|
|
|
return printed + map_groups__fprintf_removed_maps(self, fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t thread__fprintf(struct thread *self, FILE *fp)
|
|
|
|
{
|
|
|
|
return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
|
|
|
|
map_groups__fprintf(&self->mg, fp);
|
2009-08-14 18:21:53 +08:00
|
|
|
}
|
|
|
|
|
2009-12-14 05:50:28 +08:00
|
|
|
struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
|
2009-08-14 18:21:53 +08:00
|
|
|
{
|
2009-12-14 05:50:28 +08:00
|
|
|
struct rb_node **p = &self->threads.rb_node;
|
2009-08-14 18:21:53 +08:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct thread *th;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Font-end cache - PID lookups come in blocks,
|
|
|
|
* so most of the time we dont have to look up
|
|
|
|
* the full rbtree:
|
|
|
|
*/
|
2009-12-14 05:50:28 +08:00
|
|
|
if (self->last_match && self->last_match->pid == pid)
|
|
|
|
return self->last_match;
|
2009-08-14 18:21:53 +08:00
|
|
|
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
|
|
|
th = rb_entry(parent, struct thread, rb_node);
|
|
|
|
|
|
|
|
if (th->pid == pid) {
|
2009-12-14 05:50:28 +08:00
|
|
|
self->last_match = th;
|
2009-08-14 18:21:53 +08:00
|
|
|
return th;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pid < th->pid)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
|
|
|
|
2009-10-09 03:04:17 +08:00
|
|
|
th = thread__new(pid);
|
2009-08-14 18:21:53 +08:00
|
|
|
if (th != NULL) {
|
|
|
|
rb_link_node(&th->rb_node, parent, p);
|
2009-12-14 05:50:28 +08:00
|
|
|
rb_insert_color(&th->rb_node, &self->threads);
|
|
|
|
self->last_match = th;
|
2009-08-14 18:21:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return th;
|
|
|
|
}
|
|
|
|
|
2010-03-05 22:54:02 +08:00
|
|
|
static int map_groups__fixup_overlappings(struct map_groups *self,
|
|
|
|
struct map *map)
|
2009-08-14 18:21:53 +08:00
|
|
|
{
|
2009-11-28 02:29:20 +08:00
|
|
|
struct rb_root *root = &self->maps[map->type];
|
|
|
|
struct rb_node *next = rb_first(root);
|
2009-09-29 01:48:46 +08:00
|
|
|
|
|
|
|
while (next) {
|
|
|
|
struct map *pos = rb_entry(next, struct map, rb_node);
|
|
|
|
next = rb_next(&pos->rb_node);
|
|
|
|
|
|
|
|
if (!map__overlap(pos, map))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (verbose >= 2) {
|
2009-10-22 03:34:06 +08:00
|
|
|
fputs("overlapping maps:\n", stderr);
|
|
|
|
map__fprintf(map, stderr);
|
|
|
|
map__fprintf(pos, stderr);
|
2009-09-29 01:48:46 +08:00
|
|
|
}
|
|
|
|
|
2009-11-28 02:29:20 +08:00
|
|
|
rb_erase(&pos->rb_node, root);
|
2009-10-02 14:29:58 +08:00
|
|
|
/*
|
|
|
|
* We may have references to this map, for instance in some
|
|
|
|
* hist_entry instances, so just move them to a separate
|
|
|
|
* list.
|
|
|
|
*/
|
2009-11-28 02:29:20 +08:00
|
|
|
list_add_tail(&pos->node, &self->removed_maps[map->type]);
|
2010-03-05 22:54:02 +08:00
|
|
|
/*
|
|
|
|
* Now check if we need to create new maps for areas not
|
|
|
|
* overlapped by the new map:
|
|
|
|
*/
|
|
|
|
if (map->start > pos->start) {
|
|
|
|
struct map *before = map__clone(pos);
|
|
|
|
|
|
|
|
if (before == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
before->end = map->start - 1;
|
|
|
|
map_groups__insert(self, before);
|
|
|
|
if (verbose >= 2)
|
|
|
|
map__fprintf(before, stderr);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (map->end < pos->end) {
|
|
|
|
struct map *after = map__clone(pos);
|
|
|
|
|
|
|
|
if (after == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
after->start = map->end + 1;
|
|
|
|
map_groups__insert(self, after);
|
|
|
|
if (verbose >= 2)
|
|
|
|
map__fprintf(after, stderr);
|
|
|
|
}
|
2009-08-14 18:21:53 +08:00
|
|
|
}
|
2010-03-05 22:54:02 +08:00
|
|
|
|
|
|
|
return 0;
|
2009-09-29 01:48:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void thread__insert_map(struct thread *self, struct map *map)
|
|
|
|
{
|
2010-03-05 22:54:02 +08:00
|
|
|
map_groups__fixup_overlappings(&self->mg, map);
|
2009-12-12 00:50:36 +08:00
|
|
|
map_groups__insert(&self->mg, map);
|
2009-08-14 18:21:53 +08:00
|
|
|
}
|
|
|
|
|
2009-12-12 00:50:36 +08:00
|
|
|
/*
|
|
|
|
* XXX This should not really _copy_ te maps, but refcount them.
|
|
|
|
*/
|
|
|
|
static int map_groups__clone(struct map_groups *self,
|
|
|
|
struct map_groups *parent, enum map_type type)
|
2009-08-14 18:21:53 +08:00
|
|
|
{
|
2009-09-29 01:48:46 +08:00
|
|
|
struct rb_node *nd;
|
2009-11-28 02:29:20 +08:00
|
|
|
for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
|
|
|
|
struct map *map = rb_entry(nd, struct map, rb_node);
|
|
|
|
struct map *new = map__clone(map);
|
|
|
|
if (new == NULL)
|
|
|
|
return -ENOMEM;
|
2009-12-12 00:50:36 +08:00
|
|
|
map_groups__insert(self, new);
|
2009-11-28 02:29:20 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int thread__fork(struct thread *self, struct thread *parent)
|
|
|
|
{
|
|
|
|
int i;
|
2009-08-14 18:21:53 +08:00
|
|
|
|
2010-02-20 09:02:07 +08:00
|
|
|
if (parent->comm_set) {
|
|
|
|
if (self->comm)
|
|
|
|
free(self->comm);
|
|
|
|
self->comm = strdup(parent->comm);
|
|
|
|
if (!self->comm)
|
|
|
|
return -ENOMEM;
|
|
|
|
self->comm_set = true;
|
|
|
|
}
|
2009-08-14 18:21:53 +08:00
|
|
|
|
2009-11-28 02:29:20 +08:00
|
|
|
for (i = 0; i < MAP__NR_TYPES; ++i)
|
2009-12-12 00:50:36 +08:00
|
|
|
if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
|
2009-08-14 18:21:53 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-14 05:50:28 +08:00
|
|
|
size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
|
2009-08-14 18:21:53 +08:00
|
|
|
{
|
|
|
|
size_t ret = 0;
|
|
|
|
struct rb_node *nd;
|
|
|
|
|
2009-12-14 05:50:28 +08:00
|
|
|
for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
|
2009-08-14 18:21:53 +08:00
|
|
|
struct thread *pos = rb_entry(nd, struct thread, rb_node);
|
|
|
|
|
|
|
|
ret += thread__fprintf(pos, fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|