iomap: lift common tracing code from xfs to iomap
Lift the xfs code for tracing address space operations to the iomap layer. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
This commit is contained in:
parent
009d8d849d
commit
9e91c5728c
|
@ -3,13 +3,15 @@
|
|||
# Copyright (c) 2019 Oracle.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
|
||||
ccflags-y += -I $(srctree)/$(src) # needed for trace events
|
||||
|
||||
obj-$(CONFIG_FS_IOMAP) += iomap.o
|
||||
|
||||
iomap-y += \
|
||||
apply.o \
|
||||
buffered-io.o \
|
||||
direct-io.o \
|
||||
fiemap.o \
|
||||
seek.o
|
||||
|
||||
iomap-y += trace.o \
|
||||
apply.o \
|
||||
buffered-io.o \
|
||||
direct-io.o \
|
||||
fiemap.o \
|
||||
seek.o
|
||||
iomap-$(CONFIG_SWAP) += swapfile.o
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/bio.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/migrate.h>
|
||||
#include "trace.h"
|
||||
|
||||
#include "../internal.h"
|
||||
|
||||
|
@ -301,6 +302,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
|
|||
unsigned poff;
|
||||
loff_t ret;
|
||||
|
||||
trace_iomap_readpage(page->mapping->host, 1);
|
||||
|
||||
for (poff = 0; poff < PAGE_SIZE; poff += ret) {
|
||||
ret = iomap_apply(inode, page_offset(page) + poff,
|
||||
PAGE_SIZE - poff, 0, ops, &ctx,
|
||||
|
@ -397,6 +400,8 @@ iomap_readpages(struct address_space *mapping, struct list_head *pages,
|
|||
loff_t last = page_offset(list_entry(pages->next, struct page, lru));
|
||||
loff_t length = last - pos + PAGE_SIZE, ret = 0;
|
||||
|
||||
trace_iomap_readpages(mapping->host, nr_pages);
|
||||
|
||||
while (length > 0) {
|
||||
ret = iomap_apply(mapping->host, pos, length, 0, ops,
|
||||
&ctx, iomap_readpages_actor);
|
||||
|
@ -463,6 +468,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
|
|||
int
|
||||
iomap_releasepage(struct page *page, gfp_t gfp_mask)
|
||||
{
|
||||
trace_iomap_releasepage(page->mapping->host, page, 0, 0);
|
||||
|
||||
/*
|
||||
* mm accommodates an old ext3 case where clean pages might not have had
|
||||
* the dirty bit cleared. Thus, it can send actual dirty pages to
|
||||
|
@ -478,6 +485,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
|
|||
void
|
||||
iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
|
||||
{
|
||||
trace_iomap_invalidatepage(page->mapping->host, page, offset, len);
|
||||
|
||||
/*
|
||||
* If we are invalidating the entire page, clear the dirty state from it
|
||||
* and release it to avoid unnecessary buildup of the LRU.
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2019 Christoph Hellwig
|
||||
*/
|
||||
#include <linux/iomap.h>
|
||||
|
||||
/*
|
||||
* We include this last to have the helpers above available for the trace
|
||||
* event implementations.
|
||||
*/
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
|
@ -0,0 +1,87 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2009-2019 Christoph Hellwig
|
||||
*
|
||||
* NOTE: none of these tracepoints shall be consider a stable kernel ABI
|
||||
* as they can change at any time.
|
||||
*/
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM iomap
|
||||
|
||||
#if !defined(_IOMAP_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _IOMAP_TRACE_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
struct inode;
|
||||
|
||||
DECLARE_EVENT_CLASS(iomap_readpage_class,
|
||||
TP_PROTO(struct inode *inode, int nr_pages),
|
||||
TP_ARGS(inode, nr_pages),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(u64, ino)
|
||||
__field(int, nr_pages)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__entry->ino = inode->i_ino;
|
||||
__entry->nr_pages = nr_pages;
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
__entry->nr_pages)
|
||||
)
|
||||
|
||||
#define DEFINE_READPAGE_EVENT(name) \
|
||||
DEFINE_EVENT(iomap_readpage_class, name, \
|
||||
TP_PROTO(struct inode *inode, int nr_pages), \
|
||||
TP_ARGS(inode, nr_pages))
|
||||
DEFINE_READPAGE_EVENT(iomap_readpage);
|
||||
DEFINE_READPAGE_EVENT(iomap_readpages);
|
||||
|
||||
DECLARE_EVENT_CLASS(iomap_page_class,
|
||||
TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
|
||||
unsigned int len),
|
||||
TP_ARGS(inode, page, off, len),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(u64, ino)
|
||||
__field(pgoff_t, pgoff)
|
||||
__field(loff_t, size)
|
||||
__field(unsigned long, offset)
|
||||
__field(unsigned int, length)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__entry->ino = inode->i_ino;
|
||||
__entry->pgoff = page_offset(page);
|
||||
__entry->size = i_size_read(inode);
|
||||
__entry->offset = off;
|
||||
__entry->length = len;
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
|
||||
"length %x",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
__entry->pgoff,
|
||||
__entry->size,
|
||||
__entry->offset,
|
||||
__entry->length)
|
||||
)
|
||||
|
||||
#define DEFINE_PAGE_EVENT(name) \
|
||||
DEFINE_EVENT(iomap_page_class, name, \
|
||||
TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
|
||||
unsigned int len), \
|
||||
TP_ARGS(inode, page, off, len))
|
||||
DEFINE_PAGE_EVENT(iomap_releasepage);
|
||||
DEFINE_PAGE_EVENT(iomap_invalidatepage);
|
||||
|
||||
#endif /* _IOMAP_TRACE_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
#include <trace/define_trace.h>
|
|
@ -823,16 +823,6 @@ xfs_add_to_ioend(
|
|||
wbc_account_cgroup_owner(wbc, page, len);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_vm_invalidatepage(
|
||||
struct page *page,
|
||||
unsigned int offset,
|
||||
unsigned int length)
|
||||
{
|
||||
trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
|
||||
iomap_invalidatepage(page, offset, length);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the page has delalloc blocks on it, we need to punch them out before we
|
||||
* invalidate the page. If we don't, we leave a stale delalloc mapping on the
|
||||
|
@ -867,7 +857,7 @@ xfs_aops_discard_page(
|
|||
if (error && !XFS_FORCED_SHUTDOWN(mp))
|
||||
xfs_alert(mp, "page discard unable to remove delalloc mapping.");
|
||||
out_invalidate:
|
||||
xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
|
||||
iomap_invalidatepage(page, 0, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1147,15 +1137,6 @@ xfs_dax_writepages(
|
|||
xfs_find_bdev_for_inode(mapping->host), wbc);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_vm_releasepage(
|
||||
struct page *page,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
trace_xfs_releasepage(page->mapping->host, page, 0, 0);
|
||||
return iomap_releasepage(page, gfp_mask);
|
||||
}
|
||||
|
||||
STATIC sector_t
|
||||
xfs_vm_bmap(
|
||||
struct address_space *mapping,
|
||||
|
@ -1184,7 +1165,6 @@ xfs_vm_readpage(
|
|||
struct file *unused,
|
||||
struct page *page)
|
||||
{
|
||||
trace_xfs_vm_readpage(page->mapping->host, 1);
|
||||
return iomap_readpage(page, &xfs_iomap_ops);
|
||||
}
|
||||
|
||||
|
@ -1195,7 +1175,6 @@ xfs_vm_readpages(
|
|||
struct list_head *pages,
|
||||
unsigned nr_pages)
|
||||
{
|
||||
trace_xfs_vm_readpages(mapping->host, nr_pages);
|
||||
return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
|
||||
}
|
||||
|
||||
|
@ -1215,8 +1194,8 @@ const struct address_space_operations xfs_address_space_operations = {
|
|||
.writepage = xfs_vm_writepage,
|
||||
.writepages = xfs_vm_writepages,
|
||||
.set_page_dirty = iomap_set_page_dirty,
|
||||
.releasepage = xfs_vm_releasepage,
|
||||
.invalidatepage = xfs_vm_invalidatepage,
|
||||
.releasepage = iomap_releasepage,
|
||||
.invalidatepage = iomap_invalidatepage,
|
||||
.bmap = xfs_vm_bmap,
|
||||
.direct_IO = noop_direct_IO,
|
||||
.migratepage = iomap_migrate_page,
|
||||
|
|
|
@ -1197,32 +1197,6 @@ DEFINE_PAGE_EVENT(xfs_writepage);
|
|||
DEFINE_PAGE_EVENT(xfs_releasepage);
|
||||
DEFINE_PAGE_EVENT(xfs_invalidatepage);
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_readpage_class,
|
||||
TP_PROTO(struct inode *inode, int nr_pages),
|
||||
TP_ARGS(inode, nr_pages),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(xfs_ino_t, ino)
|
||||
__field(int, nr_pages)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__entry->ino = inode->i_ino;
|
||||
__entry->nr_pages = nr_pages;
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
__entry->nr_pages)
|
||||
)
|
||||
|
||||
#define DEFINE_READPAGE_EVENT(name) \
|
||||
DEFINE_EVENT(xfs_readpage_class, name, \
|
||||
TP_PROTO(struct inode *inode, int nr_pages), \
|
||||
TP_ARGS(inode, nr_pages))
|
||||
DEFINE_READPAGE_EVENT(xfs_vm_readpage);
|
||||
DEFINE_READPAGE_EVENT(xfs_vm_readpages);
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_imap_class,
|
||||
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
|
||||
int whichfork, struct xfs_bmbt_irec *irec),
|
||||
|
|
Loading…
Reference in New Issue