debugobjects: Track number of kmem_cache_alloc/kmem_cache_free done

New debugfs stat counters are added to track the numbers of
kmem_cache_alloc() and kmem_cache_free() function calls to get a
sense of how the internal debug objects cache management is performing.

Signed-off-by: Waiman Long <longman@redhat.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: "Du Changbin" <changbin.du@intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Jan Stancek <jstancek@redhat.com>
Link: http://lkml.kernel.org/r/1483647425-4135-2-git-send-email-longman@redhat.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Waiman Long 2017-01-05 15:17:03 -05:00 committed by Thomas Gleixner
parent 566cf877a1
commit c4b73aabd0
1 changed files with 10 additions and 0 deletions

View File

@ -55,6 +55,12 @@ static int debug_objects_enabled __read_mostly
static struct debug_obj_descr *descr_test __read_mostly; static struct debug_obj_descr *descr_test __read_mostly;
/*
* Track numbers of kmem_cache_alloc and kmem_cache_free done.
*/
static int debug_objects_alloc;
static int debug_objects_freed;
static void free_obj_work(struct work_struct *work); static void free_obj_work(struct work_struct *work);
static DECLARE_WORK(debug_obj_work, free_obj_work); static DECLARE_WORK(debug_obj_work, free_obj_work);
@ -102,6 +108,7 @@ static void fill_pool(void)
raw_spin_lock_irqsave(&pool_lock, flags); raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool); hlist_add_head(&new->node, &obj_pool);
debug_objects_alloc++;
obj_pool_free++; obj_pool_free++;
raw_spin_unlock_irqrestore(&pool_lock, flags); raw_spin_unlock_irqrestore(&pool_lock, flags);
} }
@ -173,6 +180,7 @@ static void free_obj_work(struct work_struct *work)
obj = hlist_entry(obj_pool.first, typeof(*obj), node); obj = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&obj->node); hlist_del(&obj->node);
obj_pool_free--; obj_pool_free--;
debug_objects_freed++;
/* /*
* We release pool_lock across kmem_cache_free() to * We release pool_lock across kmem_cache_free() to
* avoid contention on pool_lock. * avoid contention on pool_lock.
@ -758,6 +766,8 @@ static int debug_stats_show(struct seq_file *m, void *v)
seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
seq_printf(m, "pool_used :%d\n", obj_pool_used); seq_printf(m, "pool_used :%d\n", obj_pool_used);
seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
seq_printf(m, "objects_alloc :%d\n", debug_objects_alloc);
seq_printf(m, "objects_freed :%d\n", debug_objects_freed);
return 0; return 0;
} }