tracing: Remove code which merges duplicates
We now have the logic to detect and remove duplicates in the tracing_map hash table. The code which merges duplicates in the histogram is redundant now. So, modify this code just to detect duplicates. The duplication detection code is still kept to ensure that any rare race condition which might cause duplicates does not go unnoticed. Link: http://lkml.kernel.org/r/55215cf59e2674391bdaf772fdafc4c393352b03.1516069914.git.tom.zanussi@linux.intel.com Signed-off-by: Vedang Patel <vedang.patel@intel.com> Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
cbf4100efb
commit
c193707dde
|
@ -340,16 +340,6 @@ static int hist_trigger_elt_comm_alloc(struct tracing_map_elt *elt)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hist_trigger_elt_comm_copy(struct tracing_map_elt *to,
|
|
||||||
struct tracing_map_elt *from)
|
|
||||||
{
|
|
||||||
char *comm_from = from->private_data;
|
|
||||||
char *comm_to = to->private_data;
|
|
||||||
|
|
||||||
if (comm_from)
|
|
||||||
memcpy(comm_to, comm_from, TASK_COMM_LEN + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
|
static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
|
||||||
{
|
{
|
||||||
char *comm = elt->private_data;
|
char *comm = elt->private_data;
|
||||||
|
@ -360,7 +350,6 @@ static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
|
||||||
|
|
||||||
static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
|
static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
|
||||||
.elt_alloc = hist_trigger_elt_comm_alloc,
|
.elt_alloc = hist_trigger_elt_comm_alloc,
|
||||||
.elt_copy = hist_trigger_elt_comm_copy,
|
|
||||||
.elt_free = hist_trigger_elt_comm_free,
|
.elt_free = hist_trigger_elt_comm_free,
|
||||||
.elt_init = hist_trigger_elt_comm_init,
|
.elt_init = hist_trigger_elt_comm_init,
|
||||||
};
|
};
|
||||||
|
|
|
@ -847,67 +847,15 @@ create_sort_entry(void *key, struct tracing_map_elt *elt)
|
||||||
return sort_entry;
|
return sort_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tracing_map_elt *copy_elt(struct tracing_map_elt *elt)
|
static void detect_dups(struct tracing_map_sort_entry **sort_entries,
|
||||||
{
|
|
||||||
struct tracing_map_elt *dup_elt;
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
dup_elt = tracing_map_elt_alloc(elt->map);
|
|
||||||
if (IS_ERR(dup_elt))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
if (elt->map->ops && elt->map->ops->elt_copy)
|
|
||||||
elt->map->ops->elt_copy(dup_elt, elt);
|
|
||||||
|
|
||||||
dup_elt->private_data = elt->private_data;
|
|
||||||
memcpy(dup_elt->key, elt->key, elt->map->key_size);
|
|
||||||
|
|
||||||
for (i = 0; i < elt->map->n_fields; i++) {
|
|
||||||
atomic64_set(&dup_elt->fields[i].sum,
|
|
||||||
atomic64_read(&elt->fields[i].sum));
|
|
||||||
dup_elt->fields[i].cmp_fn = elt->fields[i].cmp_fn;
|
|
||||||
}
|
|
||||||
|
|
||||||
return dup_elt;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int merge_dup(struct tracing_map_sort_entry **sort_entries,
|
|
||||||
unsigned int target, unsigned int dup)
|
|
||||||
{
|
|
||||||
struct tracing_map_elt *target_elt, *elt;
|
|
||||||
bool first_dup = (target - dup) == 1;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (first_dup) {
|
|
||||||
elt = sort_entries[target]->elt;
|
|
||||||
target_elt = copy_elt(elt);
|
|
||||||
if (!target_elt)
|
|
||||||
return -ENOMEM;
|
|
||||||
sort_entries[target]->elt = target_elt;
|
|
||||||
sort_entries[target]->elt_copied = true;
|
|
||||||
} else
|
|
||||||
target_elt = sort_entries[target]->elt;
|
|
||||||
|
|
||||||
elt = sort_entries[dup]->elt;
|
|
||||||
|
|
||||||
for (i = 0; i < elt->map->n_fields; i++)
|
|
||||||
atomic64_add(atomic64_read(&elt->fields[i].sum),
|
|
||||||
&target_elt->fields[i].sum);
|
|
||||||
|
|
||||||
sort_entries[dup]->dup = true;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int merge_dups(struct tracing_map_sort_entry **sort_entries,
|
|
||||||
int n_entries, unsigned int key_size)
|
int n_entries, unsigned int key_size)
|
||||||
{
|
{
|
||||||
unsigned int dups = 0, total_dups = 0;
|
unsigned int dups = 0, total_dups = 0;
|
||||||
int err, i, j;
|
int i;
|
||||||
void *key;
|
void *key;
|
||||||
|
|
||||||
if (n_entries < 2)
|
if (n_entries < 2)
|
||||||
return total_dups;
|
return;
|
||||||
|
|
||||||
sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
|
sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
|
||||||
(int (*)(const void *, const void *))cmp_entries_dup, NULL);
|
(int (*)(const void *, const void *))cmp_entries_dup, NULL);
|
||||||
|
@ -916,30 +864,14 @@ static int merge_dups(struct tracing_map_sort_entry **sort_entries,
|
||||||
for (i = 1; i < n_entries; i++) {
|
for (i = 1; i < n_entries; i++) {
|
||||||
if (!memcmp(sort_entries[i]->key, key, key_size)) {
|
if (!memcmp(sort_entries[i]->key, key, key_size)) {
|
||||||
dups++; total_dups++;
|
dups++; total_dups++;
|
||||||
err = merge_dup(sort_entries, i - dups, i);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
key = sort_entries[i]->key;
|
key = sort_entries[i]->key;
|
||||||
dups = 0;
|
dups = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!total_dups)
|
WARN_ONCE(total_dups > 0,
|
||||||
return total_dups;
|
"Duplicates detected: %d\n", total_dups);
|
||||||
|
|
||||||
for (i = 0, j = 0; i < n_entries; i++) {
|
|
||||||
if (!sort_entries[i]->dup) {
|
|
||||||
sort_entries[j] = sort_entries[i];
|
|
||||||
if (j++ != i)
|
|
||||||
sort_entries[i] = NULL;
|
|
||||||
} else {
|
|
||||||
destroy_sort_entry(sort_entries[i]);
|
|
||||||
sort_entries[i] = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return total_dups;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_key(struct tracing_map *map, unsigned int field_idx)
|
static bool is_key(struct tracing_map *map, unsigned int field_idx)
|
||||||
|
@ -1065,10 +997,7 @@ int tracing_map_sort_entries(struct tracing_map *map,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = merge_dups(entries, n_entries, map->key_size);
|
detect_dups(entries, n_entries, map->key_size);
|
||||||
if (ret < 0)
|
|
||||||
goto free;
|
|
||||||
n_entries -= ret;
|
|
||||||
|
|
||||||
if (is_key(map, sort_keys[0].field_idx))
|
if (is_key(map, sort_keys[0].field_idx))
|
||||||
cmp_entries_fn = cmp_entries_key;
|
cmp_entries_fn = cmp_entries_key;
|
||||||
|
|
|
@ -215,11 +215,6 @@ struct tracing_map {
|
||||||
* Element allocation occurs before tracing begins, when the
|
* Element allocation occurs before tracing begins, when the
|
||||||
* tracing_map_init() call is made by client code.
|
* tracing_map_init() call is made by client code.
|
||||||
*
|
*
|
||||||
* @elt_copy: At certain points in the lifetime of an element, it may
|
|
||||||
* need to be copied. The copy should include a copy of the
|
|
||||||
* client-allocated data, which can be copied into the 'to'
|
|
||||||
* element from the 'from' element.
|
|
||||||
*
|
|
||||||
* @elt_free: When a tracing_map_elt is freed, this function is called
|
* @elt_free: When a tracing_map_elt is freed, this function is called
|
||||||
* and allows client-allocated per-element data to be freed.
|
* and allows client-allocated per-element data to be freed.
|
||||||
*
|
*
|
||||||
|
@ -233,8 +228,6 @@ struct tracing_map {
|
||||||
*/
|
*/
|
||||||
struct tracing_map_ops {
|
struct tracing_map_ops {
|
||||||
int (*elt_alloc)(struct tracing_map_elt *elt);
|
int (*elt_alloc)(struct tracing_map_elt *elt);
|
||||||
void (*elt_copy)(struct tracing_map_elt *to,
|
|
||||||
struct tracing_map_elt *from);
|
|
||||||
void (*elt_free)(struct tracing_map_elt *elt);
|
void (*elt_free)(struct tracing_map_elt *elt);
|
||||||
void (*elt_clear)(struct tracing_map_elt *elt);
|
void (*elt_clear)(struct tracing_map_elt *elt);
|
||||||
void (*elt_init)(struct tracing_map_elt *elt);
|
void (*elt_init)(struct tracing_map_elt *elt);
|
||||||
|
|
Loading…
Reference in New Issue