perf evlist: Fix perf_evlist__mmap_read event overflow
The perf_evlist__mmap_read used 'union perf_event' as a placeholder for event crossing the mmap boundary. This is ok for sample shorter than ~PATH_MAX. However we could grow up to the maximum sample size which is 16 bits max. I hit this overflow issue when using 'perf top -G dwarf' which produces sample with the size around 8192 bytes. We could configure any valid sample size here using: '-G dwarf,size'. Using array with sample max size instead for the event placeholder. Also adding another safe check for the dynamic size of the user stack. TODO: The 'struct perf_mmap' is quite big now, maybe we could use some lazy allocation for event_copy size. Signed-off-by: Jiri Olsa <jolsa@redhat.com> Acked-by: David Ahern <dsahern@gmail.com> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1380721599-24285-1-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
b81a48ea87
commit
a65cb4b9f8
|
@ -75,6 +75,9 @@ struct throttle_event {
|
||||||
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
|
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
|
||||||
PERF_SAMPLE_IDENTIFIER)
|
PERF_SAMPLE_IDENTIFIER)
|
||||||
|
|
||||||
|
/* perf sample has 16 bits size limit */
|
||||||
|
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
|
||||||
|
|
||||||
struct sample_event {
|
struct sample_event {
|
||||||
struct perf_event_header header;
|
struct perf_event_header header;
|
||||||
u64 array[];
|
u64 array[];
|
||||||
|
|
|
@ -540,7 +540,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
|
||||||
if ((old & md->mask) + size != ((old + size) & md->mask)) {
|
if ((old & md->mask) + size != ((old + size) & md->mask)) {
|
||||||
unsigned int offset = old;
|
unsigned int offset = old;
|
||||||
unsigned int len = min(sizeof(*event), size), cpy;
|
unsigned int len = min(sizeof(*event), size), cpy;
|
||||||
void *dst = &md->event_copy;
|
void *dst = md->event_copy;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
cpy = min(md->mask + 1 - (offset & md->mask), len);
|
cpy = min(md->mask + 1 - (offset & md->mask), len);
|
||||||
|
@ -550,7 +550,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
|
||||||
len -= cpy;
|
len -= cpy;
|
||||||
} while (len);
|
} while (len);
|
||||||
|
|
||||||
event = &md->event_copy;
|
event = (union perf_event *) md->event_copy;
|
||||||
}
|
}
|
||||||
|
|
||||||
old += size;
|
old += size;
|
||||||
|
|
|
@ -21,7 +21,7 @@ struct perf_mmap {
|
||||||
void *base;
|
void *base;
|
||||||
int mask;
|
int mask;
|
||||||
unsigned int prev;
|
unsigned int prev;
|
||||||
union perf_event event_copy;
|
char event_copy[PERF_SAMPLE_MAX_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct perf_evlist {
|
struct perf_evlist {
|
||||||
|
|
|
@ -1456,6 +1456,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
|
||||||
array = (void *)array + sz;
|
array = (void *)array + sz;
|
||||||
OVERFLOW_CHECK_u64(array);
|
OVERFLOW_CHECK_u64(array);
|
||||||
data->user_stack.size = *array++;
|
data->user_stack.size = *array++;
|
||||||
|
if (WARN_ONCE(data->user_stack.size > sz,
|
||||||
|
"user stack dump failure\n"))
|
||||||
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue