mm: trace filemap add and del

Use the events API to trace filemap loading and unloading of file pieces
into the page cache.

This patch aims at tracing the eviction reload cycle of executable and
shared libraries pages in a memory constrained environment.

The typical usage is to spot a specific device and inode (for example
/lib/libc.so) to see the eviction cycles, and find out if frequently
used code is rather spread across many pages (bad) or coallesced (good).

Signed-off-by: Robert Jarzmik <robert.jarzmik@free.fr>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Robert Jarzmik 2013-04-29 15:06:10 -07:00 committed by Linus Torvalds
parent e39862958d
commit fe0bfaaff8
2 changed files with 63 additions and 0 deletions

View File

@ -0,0 +1,58 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM filemap
#if !defined(_TRACE_FILEMAP_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_FILEMAP_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
TP_PROTO(struct page *page),
TP_ARGS(page),
TP_STRUCT__entry(
__field(struct page *, page)
__field(unsigned long, i_ino)
__field(unsigned long, index)
__field(dev_t, s_dev)
),
TP_fast_assign(
__entry->page = page;
__entry->i_ino = page->mapping->host->i_ino;
__entry->index = page->index;
if (page->mapping->host->i_sb)
__entry->s_dev = page->mapping->host->i_sb->s_dev;
else
__entry->s_dev = page->mapping->host->i_rdev;
),
TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino,
__entry->page,
page_to_pfn(__entry->page),
__entry->index << PAGE_SHIFT)
);
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_delete_from_page_cache,
TP_PROTO(struct page *page),
TP_ARGS(page)
);
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
TP_PROTO(struct page *page),
TP_ARGS(page)
);
#endif /* _TRACE_FILEMAP_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -35,6 +35,9 @@
#include <linux/cleancache.h> #include <linux/cleancache.h>
#include "internal.h" #include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/filemap.h>
/* /*
* FIXME: remove all knowledge of the buffer layer from the core VM * FIXME: remove all knowledge of the buffer layer from the core VM
*/ */
@ -113,6 +116,7 @@ void __delete_from_page_cache(struct page *page)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
trace_mm_filemap_delete_from_page_cache(page);
/* /*
* if we're uptodate, flush out into the cleancache, otherwise * if we're uptodate, flush out into the cleancache, otherwise
* invalidate any existing cleancache entries. We can't leave * invalidate any existing cleancache entries. We can't leave
@ -464,6 +468,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
mapping->nrpages++; mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES); __inc_zone_page_state(page, NR_FILE_PAGES);
spin_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
trace_mm_filemap_add_to_page_cache(page);
} else { } else {
page->mapping = NULL; page->mapping = NULL;
/* Leave page->index set: truncation relies upon it */ /* Leave page->index set: truncation relies upon it */