afs: Trace page dirty/clean

Add a trace event that logs the dirtying and cleaning of pages attached to
AFS inodes.

Signed-off-by: David Howells <dhowells@redhat.com>
This commit is contained in:
David Howells 2017-11-02 15:27:53 +00:00
parent 1cf7a1518a
commit 13524ab3c6
3 changed files with 70 additions and 13 deletions

View File

@ -583,6 +583,9 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
static void afs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
unsigned long priv;
_enter("{%lu},%u,%u", page->index, offset, length);
BUG_ON(!PageLocked(page));
@ -598,6 +601,9 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
#endif
if (PagePrivate(page)) {
priv = page_private(page);
trace_afs_page_dirty(vnode, tracepoint_string("inval"),
page->index, priv);
set_page_private(page, 0);
ClearPagePrivate(page);
}
@ -613,6 +619,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
static int afs_releasepage(struct page *page, gfp_t gfp_flags)
{
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
unsigned long priv;
_enter("{{%x:%u}[%lu],%lx},%x",
vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
@ -628,6 +635,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
#endif
if (PagePrivate(page)) {
priv = page_private(page);
trace_afs_page_dirty(vnode, tracepoint_string("rel"),
page->index, priv);
set_page_private(page, 0);
ClearPagePrivate(page);
}

View File

@ -17,19 +17,6 @@
#include <linux/pagevec.h>
#include "internal.h"
/*
* We use page->private to hold the amount of the page that we've written to,
* splitting the field into two parts. However, we need to represent a range
* 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
*/
#if PAGE_SIZE > 32768
#define AFS_PRIV_MAX 0xffffffff
#define AFS_PRIV_SHIFT 32
#else
#define AFS_PRIV_MAX 0xffff
#define AFS_PRIV_SHIFT 16
#endif
/*
* mark a page as having been made dirty and thus needing writeback
*/
@ -145,6 +132,8 @@ try_again:
priv = (unsigned long)t << AFS_PRIV_SHIFT;
priv |= f;
trace_afs_page_dirty(vnode, tracepoint_string("begin"),
page->index, priv);
SetPagePrivate(page);
set_page_private(page, priv);
_leave(" = 0");
@ -386,6 +375,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
struct page *primary_page,
pgoff_t final_page)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
struct page *pages[8], *page;
unsigned long count, priv;
unsigned n, offset, to, f, t;
@ -407,8 +397,13 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
priv = page_private(primary_page);
offset = priv & AFS_PRIV_MAX;
to = priv >> AFS_PRIV_SHIFT;
trace_afs_page_dirty(vnode, tracepoint_string("store"),
primary_page->index, priv);
WARN_ON(offset == to);
if (offset == to)
trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
primary_page->index, priv);
if (start >= final_page || to < PAGE_SIZE)
goto no_more;
@ -452,6 +447,9 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
}
to = t;
trace_afs_page_dirty(vnode, tracepoint_string("store+"),
page->index, priv);
if (!clear_page_dirty_for_io(page))
BUG();
if (test_set_page_writeback(page))
@ -657,6 +655,7 @@ int afs_writepages(struct address_space *mapping,
void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
{
struct pagevec pv;
unsigned long priv;
unsigned count, loop;
pgoff_t first = call->first, last = call->last;
@ -676,6 +675,9 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
ASSERTCMP(pv.nr, ==, count);
for (loop = 0; loop < count; loop++) {
priv = page_private(pv.pages[loop]);
trace_afs_page_dirty(vnode, tracepoint_string("clear"),
pv.pages[loop]->index, priv);
set_page_private(pv.pages[loop], 0);
end_page_writeback(pv.pages[loop]);
}
@ -783,6 +785,8 @@ int afs_page_mkwrite(struct vm_fault *vmf)
priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
priv |= 0; /* From */
trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
vmf->page->index, priv);
SetPagePrivate(vmf->page);
set_page_private(vmf->page, priv);
@ -840,9 +844,13 @@ int afs_launder_page(struct page *page)
t = priv >> AFS_PRIV_SHIFT;
}
trace_afs_page_dirty(vnode, tracepoint_string("launder"),
page->index, priv);
ret = afs_store_data(mapping, page->index, page->index, t, f);
}
trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
page->index, priv);
set_page_private(page, 0);
ClearPagePrivate(page);

View File

@ -402,6 +402,45 @@ TRACE_EVENT(afs_dir_check_failed,
__entry->vnode, __entry->off, __entry->i_size)
);
/*
* We use page->private to hold the amount of the page that we've written to,
* splitting the field into two parts. However, we need to represent a range
* 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
*/
#if PAGE_SIZE > 32768
#define AFS_PRIV_MAX 0xffffffff
#define AFS_PRIV_SHIFT 32
#else
#define AFS_PRIV_MAX 0xffff
#define AFS_PRIV_SHIFT 16
#endif
TRACE_EVENT(afs_page_dirty,
TP_PROTO(struct afs_vnode *vnode, const char *where,
pgoff_t page, unsigned long priv),
TP_ARGS(vnode, where, page, priv),
TP_STRUCT__entry(
__field(struct afs_vnode *, vnode )
__field(const char *, where )
__field(pgoff_t, page )
__field(unsigned long, priv )
),
TP_fast_assign(
__entry->vnode = vnode;
__entry->where = where;
__entry->page = page;
__entry->priv = priv;
),
TP_printk("vn=%p %lx %s %lu-%lu",
__entry->vnode, __entry->page, __entry->where,
__entry->priv & AFS_PRIV_MAX,
__entry->priv >> AFS_PRIV_SHIFT)
);
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */