dax: correct dax iomap code namespace
The recently added DAX functions that use the new struct iomap data structure were named iomap_dax_rw(), iomap_dax_fault() and iomap_dax_actor(). These are actually defined in fs/dax.c, though, so should be part of the "dax" namespace and not the "iomap" namespace. Rename them to dax_iomap_rw(), dax_iomap_fault() and dax_iomap_actor() respectively. Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Suggested-by: Dave Chinner <david@fromorbit.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Dave Chinner <david@fromorbit.com>
This commit is contained in:
parent
b9fde0462e
commit
11c59c92f4
16
fs/dax.c
16
fs/dax.c
|
@ -1031,7 +1031,7 @@ EXPORT_SYMBOL_GPL(dax_truncate_page);
|
|||
|
||||
#ifdef CONFIG_FS_IOMAP
|
||||
static loff_t
|
||||
iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
struct iomap *iomap)
|
||||
{
|
||||
struct iov_iter *iter = data;
|
||||
|
@ -1088,7 +1088,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||
}
|
||||
|
||||
/**
|
||||
* iomap_dax_rw - Perform I/O to a DAX file
|
||||
* dax_iomap_rw - Perform I/O to a DAX file
|
||||
* @iocb: The control block for this I/O
|
||||
* @iter: The addresses to do I/O from or to
|
||||
* @ops: iomap ops passed from the file system
|
||||
|
@ -1098,7 +1098,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||
* and evicting any page cache pages in the region under I/O.
|
||||
*/
|
||||
ssize_t
|
||||
iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
struct iomap_ops *ops)
|
||||
{
|
||||
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
||||
|
@ -1128,7 +1128,7 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
|
||||
while (iov_iter_count(iter)) {
|
||||
ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
|
||||
iter, iomap_dax_actor);
|
||||
iter, dax_iomap_actor);
|
||||
if (ret <= 0)
|
||||
break;
|
||||
pos += ret;
|
||||
|
@ -1138,10 +1138,10 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
iocb->ki_pos += done;
|
||||
return done ? done : ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_dax_rw);
|
||||
EXPORT_SYMBOL_GPL(dax_iomap_rw);
|
||||
|
||||
/**
|
||||
* iomap_dax_fault - handle a page fault on a DAX file
|
||||
* dax_iomap_fault - handle a page fault on a DAX file
|
||||
* @vma: The virtual memory area where the fault occurred
|
||||
* @vmf: The description of the fault
|
||||
* @ops: iomap ops passed from the file system
|
||||
|
@ -1150,7 +1150,7 @@ EXPORT_SYMBOL_GPL(iomap_dax_rw);
|
|||
* or mkwrite handler for DAX files. Assumes the caller has done all the
|
||||
* necessary locking for the page fault to proceed successfully.
|
||||
*/
|
||||
int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
struct iomap_ops *ops)
|
||||
{
|
||||
struct address_space *mapping = vma->vm_file->f_mapping;
|
||||
|
@ -1252,5 +1252,5 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|||
return VM_FAULT_SIGBUS | major;
|
||||
return VM_FAULT_NOPAGE | major;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_dax_fault);
|
||||
EXPORT_SYMBOL_GPL(dax_iomap_fault);
|
||||
#endif /* CONFIG_FS_IOMAP */
|
||||
|
|
|
@ -38,7 +38,7 @@ static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
return 0; /* skip atime */
|
||||
|
||||
inode_lock_shared(inode);
|
||||
ret = iomap_dax_rw(iocb, to, &ext2_iomap_ops);
|
||||
ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops);
|
||||
inode_unlock_shared(inode);
|
||||
|
||||
file_accessed(iocb->ki_filp);
|
||||
|
@ -62,7 +62,7 @@ static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = iomap_dax_rw(iocb, from, &ext2_iomap_ops);
|
||||
ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops);
|
||||
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
|
||||
i_size_write(inode, iocb->ki_pos);
|
||||
mark_inode_dirty(inode);
|
||||
|
@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
}
|
||||
down_read(&ei->dax_sem);
|
||||
|
||||
ret = iomap_dax_fault(vma, vmf, &ext2_iomap_ops);
|
||||
ret = dax_iomap_fault(vma, vmf, &ext2_iomap_ops);
|
||||
|
||||
up_read(&ei->dax_sem);
|
||||
if (vmf->flags & FAULT_FLAG_WRITE)
|
||||
|
|
|
@ -344,7 +344,7 @@ xfs_file_dax_read(
|
|||
return 0; /* skip atime */
|
||||
|
||||
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
|
||||
ret = iomap_dax_rw(iocb, to, &xfs_iomap_ops);
|
||||
ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
|
||||
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
|
||||
|
||||
file_accessed(iocb->ki_filp);
|
||||
|
@ -691,7 +691,7 @@ xfs_file_dax_write(
|
|||
|
||||
trace_xfs_file_dax_write(ip, count, pos);
|
||||
|
||||
ret = iomap_dax_rw(iocb, from, &xfs_iomap_ops);
|
||||
ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
|
||||
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
|
||||
i_size_write(inode, iocb->ki_pos);
|
||||
error = xfs_setfilesize(ip, pos, ret);
|
||||
|
@ -1640,7 +1640,7 @@ xfs_filemap_page_mkwrite(
|
|||
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
|
||||
|
||||
if (IS_DAX(inode)) {
|
||||
ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
|
||||
ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
|
||||
} else {
|
||||
ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
|
||||
ret = block_page_mkwrite_return(ret);
|
||||
|
@ -1674,7 +1674,7 @@ xfs_filemap_fault(
|
|||
* changes to xfs_get_blocks_direct() to map unwritten extent
|
||||
* ioend for conversion on read-only mappings.
|
||||
*/
|
||||
ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
|
||||
ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
|
||||
} else
|
||||
ret = filemap_fault(vma, vmf);
|
||||
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
|
||||
|
|
|
@ -11,13 +11,13 @@ struct iomap_ops;
|
|||
/* We use lowest available exceptional entry bit for locking */
|
||||
#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
|
||||
|
||||
ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
struct iomap_ops *ops);
|
||||
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
|
||||
get_block_t, dio_iodone_t, int flags);
|
||||
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
|
||||
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
|
||||
int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
struct iomap_ops *ops);
|
||||
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
|
||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
|
|
Loading…
Reference in New Issue