2015-02-16 08:49:23 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014 Christoph Hellwig.
|
|
|
|
*/
|
2016-06-21 07:22:39 +08:00
|
|
|
#include <linux/iomap.h>
|
2015-02-16 08:49:23 +08:00
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
|
|
|
#include "xfs_sb.h"
|
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_inode.h"
|
|
|
|
#include "xfs_trans.h"
|
|
|
|
#include "xfs_log.h"
|
|
|
|
#include "xfs_bmap.h"
|
|
|
|
#include "xfs_bmap_util.h"
|
|
|
|
#include "xfs_error.h"
|
|
|
|
#include "xfs_iomap.h"
|
|
|
|
#include "xfs_shared.h"
|
|
|
|
#include "xfs_bit.h"
|
|
|
|
#include "xfs_pnfs.h"
|
|
|
|
|
2015-02-16 08:59:50 +08:00
|
|
|
/*
|
|
|
|
* Ensure that we do not have any outstanding pNFS layouts that can be used by
|
|
|
|
* clients to directly read from or write to this inode. This must be called
|
|
|
|
* before every operation that can remove blocks from the extent map.
|
|
|
|
* Additionally we call it during the write operation, where aren't concerned
|
|
|
|
* about exposing unallocated blocks but just want to provide basic
|
|
|
|
* synchronization between a local writer and pNFS clients. mmap writes would
|
|
|
|
* also benefit from this sort of synchronization, but due to the tricky locking
|
|
|
|
* rules in the page fault path we don't bother.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_break_layouts(
|
|
|
|
struct inode *inode,
|
2016-11-30 11:33:25 +08:00
|
|
|
uint *iolock)
|
2015-02-16 08:59:50 +08:00
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
int error;
|
|
|
|
|
|
|
|
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
|
|
|
|
|
|
|
|
while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
|
|
|
|
xfs_iunlock(ip, *iolock);
|
|
|
|
error = break_layout(inode, true);
|
|
|
|
*iolock = XFS_IOLOCK_EXCL;
|
|
|
|
xfs_ilock(ip, *iolock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2015-02-16 08:49:23 +08:00
|
|
|
/*
|
|
|
|
* Get a unique ID including its location so that the client can identify
|
|
|
|
* the exported device.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_fs_get_uuid(
|
|
|
|
struct super_block *sb,
|
|
|
|
u8 *buf,
|
|
|
|
u32 *len,
|
|
|
|
u64 *offset)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = XFS_M(sb);
|
|
|
|
|
|
|
|
printk_once(KERN_NOTICE
|
|
|
|
"XFS (%s): using experimental pNFS feature, use at your own risk!\n",
|
|
|
|
mp->m_fsname);
|
|
|
|
|
|
|
|
if (*len < sizeof(uuid_t))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t));
|
|
|
|
*len = sizeof(uuid_t);
|
|
|
|
*offset = offsetof(struct xfs_dsb, sb_uuid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a layout for the pNFS client.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_fs_map_blocks(
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t offset,
|
|
|
|
u64 length,
|
|
|
|
struct iomap *iomap,
|
|
|
|
bool write,
|
|
|
|
u32 *device_generation)
|
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_bmbt_irec imap;
|
|
|
|
xfs_fileoff_t offset_fsb, end_fsb;
|
|
|
|
loff_t limit;
|
|
|
|
int bmapi_flags = XFS_BMAPI_ENTIRE;
|
|
|
|
int nimaps = 1;
|
|
|
|
uint lock_flags;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
if (XFS_FORCED_SHUTDOWN(mp))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can't export inodes residing on the realtime device. The realtime
|
|
|
|
* device doesn't have a UUID to identify it, so the client has no way
|
|
|
|
* to find it.
|
|
|
|
*/
|
|
|
|
if (XFS_IS_REALTIME_INODE(ip))
|
|
|
|
return -ENXIO;
|
|
|
|
|
2016-10-04 00:11:16 +08:00
|
|
|
/*
|
|
|
|
* The pNFS block layout spec actually supports reflink like
|
|
|
|
* functionality, but the Linux pNFS server doesn't implement it yet.
|
|
|
|
*/
|
|
|
|
if (xfs_is_reflink_inode(ip))
|
|
|
|
return -ENXIO;
|
|
|
|
|
2015-02-16 08:49:23 +08:00
|
|
|
/*
|
|
|
|
* Lock out any other I/O before we flush and invalidate the pagecache,
|
|
|
|
* and then hand out a layout to the remote system. This is very
|
|
|
|
* similar to direct I/O, except that the synchronization is much more
|
|
|
|
* complicated. See the comment near xfs_break_layouts for a detailed
|
|
|
|
* explanation.
|
|
|
|
*/
|
|
|
|
xfs_ilock(ip, XFS_IOLOCK_EXCL);
|
|
|
|
|
|
|
|
error = -EINVAL;
|
|
|
|
limit = mp->m_super->s_maxbytes;
|
|
|
|
if (!write)
|
|
|
|
limit = max(limit, round_up(i_size_read(inode),
|
|
|
|
inode->i_sb->s_blocksize));
|
|
|
|
if (offset > limit)
|
|
|
|
goto out_unlock;
|
|
|
|
if (offset > limit - length)
|
|
|
|
length = limit - offset;
|
|
|
|
|
|
|
|
error = filemap_write_and_wait(inode->i_mapping);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
error = invalidate_inode_pages2(inode->i_mapping);
|
|
|
|
if (WARN_ON_ONCE(error))
|
|
|
|
return error;
|
|
|
|
|
|
|
|
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
|
|
|
|
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
|
|
|
|
|
|
|
lock_flags = xfs_ilock_data_map_shared(ip);
|
|
|
|
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
|
|
|
|
&imap, &nimaps, bmapi_flags);
|
|
|
|
xfs_iunlock(ip, lock_flags);
|
|
|
|
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
if (write) {
|
|
|
|
enum xfs_prealloc_flags flags = 0;
|
|
|
|
|
|
|
|
ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
|
|
|
|
|
|
|
|
if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
|
2015-10-12 12:34:20 +08:00
|
|
|
/*
|
|
|
|
* xfs_iomap_write_direct() expects to take ownership of
|
|
|
|
* the shared ilock.
|
|
|
|
*/
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
2015-02-16 08:49:23 +08:00
|
|
|
error = xfs_iomap_write_direct(ip, offset, length,
|
|
|
|
&imap, nimaps);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure the next transaction is committed
|
|
|
|
* synchronously so that the blocks allocated and
|
|
|
|
* handed out to the client are guaranteed to be
|
|
|
|
* present even after a server crash.
|
|
|
|
*/
|
|
|
|
flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = xfs_update_prealloc_flags(ip, flags);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
|
|
|
|
|
|
|
xfs_bmbt_to_iomap(ip, iomap, &imap);
|
|
|
|
*device_generation = mp->m_generation;
|
|
|
|
return error;
|
|
|
|
out_unlock:
|
|
|
|
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure the size update falls into a valid allocated block.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xfs_pnfs_validate_isize(
|
|
|
|
struct xfs_inode *ip,
|
|
|
|
xfs_off_t isize)
|
|
|
|
{
|
|
|
|
struct xfs_bmbt_irec imap;
|
|
|
|
int nimaps = 1;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
|
|
|
error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1,
|
|
|
|
&imap, &nimaps, 0);
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
if (imap.br_startblock == HOLESTARTBLOCK ||
|
|
|
|
imap.br_startblock == DELAYSTARTBLOCK ||
|
|
|
|
imap.br_state == XFS_EXT_UNWRITTEN)
|
|
|
|
return -EIO;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure the blocks described by maps are stable on disk. This includes
|
|
|
|
* converting any unwritten extents, flushing the disk cache and updating the
|
|
|
|
* time stamps.
|
|
|
|
*
|
|
|
|
* Note that we rely on the caller to always send us a timestamp update so that
|
|
|
|
* we always commit a transaction here. If that stops being true we will have
|
|
|
|
* to manually flush the cache here similar to what the fsync code path does
|
|
|
|
* for datasyncs on files that have no dirty metadata.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_fs_commit_blocks(
|
|
|
|
struct inode *inode,
|
|
|
|
struct iomap *maps,
|
|
|
|
int nr_maps,
|
|
|
|
struct iattr *iattr)
|
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct xfs_trans *tp;
|
|
|
|
bool update_isize = false;
|
|
|
|
int error, i;
|
|
|
|
loff_t size;
|
|
|
|
|
|
|
|
ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME));
|
|
|
|
|
|
|
|
xfs_ilock(ip, XFS_IOLOCK_EXCL);
|
|
|
|
|
|
|
|
size = i_size_read(inode);
|
|
|
|
if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) {
|
|
|
|
update_isize = true;
|
|
|
|
size = iattr->ia_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nr_maps; i++) {
|
|
|
|
u64 start, length, end;
|
|
|
|
|
|
|
|
start = maps[i].offset;
|
|
|
|
if (start > size)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
end = start + maps[i].length;
|
|
|
|
if (end > size)
|
|
|
|
end = size;
|
|
|
|
|
|
|
|
length = end - start;
|
|
|
|
if (!length)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure reads through the pagecache see the new data.
|
|
|
|
*/
|
|
|
|
error = invalidate_inode_pages2_range(inode->i_mapping,
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
start >> PAGE_SHIFT,
|
|
|
|
(end - 1) >> PAGE_SHIFT);
|
2015-02-16 08:49:23 +08:00
|
|
|
WARN_ON_ONCE(error);
|
|
|
|
|
|
|
|
error = xfs_iomap_write_unwritten(ip, start, length);
|
|
|
|
if (error)
|
|
|
|
goto out_drop_iolock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (update_isize) {
|
|
|
|
error = xfs_pnfs_validate_isize(ip, size);
|
|
|
|
if (error)
|
|
|
|
goto out_drop_iolock;
|
|
|
|
}
|
|
|
|
|
2016-04-06 07:19:55 +08:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
|
|
|
|
if (error)
|
2015-02-16 08:49:23 +08:00
|
|
|
goto out_drop_iolock;
|
|
|
|
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
|
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
|
|
|
|
xfs_setattr_time(ip, iattr);
|
|
|
|
if (update_isize) {
|
|
|
|
i_size_write(inode, iattr->ia_size);
|
|
|
|
ip->i_d.di_size = iattr->ia_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_trans_set_sync(tp);
|
2015-06-04 11:48:08 +08:00
|
|
|
error = xfs_trans_commit(tp);
|
2015-02-16 08:49:23 +08:00
|
|
|
|
|
|
|
out_drop_iolock:
|
|
|
|
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
|
|
|
return error;
|
|
|
|
}
|