Merge branch 'fscache-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs
Pull fscache cleanups from David Howells: - fix checker complaint in afs - two netfs cleanups: - netfs_inode calling convention cleanup plus the requisite documentation changes - replace the ->cleanup op with a ->free_request op. This is possible as the I/O request is now always available at the cleanup point as the stuff to be cleaned up is no longer passed into the API functions, but rather obtained by ->init_request. * 'fscache-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs: netfs: Rename the netfs_io_request cleanup op and give it an op pointer netfs: Further cleanups after struct netfs_inode wrapper introduced afs: Fix some checker issues
This commit is contained in:
commit
045fb9c2f5
|
@ -79,7 +79,7 @@ To help deal with the per-inode context, a number helper functions are
|
|||
provided. Firstly, a function to perform basic initialisation on a context and
|
||||
set the operations table pointer::
|
||||
|
||||
void netfs_inode_init(struct inode *inode,
|
||||
void netfs_inode_init(struct netfs_inode *ctx,
|
||||
const struct netfs_request_ops *ops);
|
||||
|
||||
then a function to cast from the VFS inode structure to the netfs context::
|
||||
|
@ -89,7 +89,7 @@ then a function to cast from the VFS inode structure to the netfs context::
|
|||
and finally, a function to get the cache cookie pointer from the context
|
||||
attached to an inode (or NULL if fscache is disabled)::
|
||||
|
||||
struct fscache_cookie *netfs_i_cookie(struct inode *inode);
|
||||
struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx);
|
||||
|
||||
|
||||
Buffered Read Helpers
|
||||
|
@ -136,8 +136,9 @@ Three read helpers are provided::
|
|||
|
||||
void netfs_readahead(struct readahead_control *ractl);
|
||||
int netfs_read_folio(struct file *file,
|
||||
struct folio *folio);
|
||||
int netfs_write_begin(struct file *file,
|
||||
struct folio *folio);
|
||||
int netfs_write_begin(struct netfs_inode *ctx,
|
||||
struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos,
|
||||
unsigned int len,
|
||||
|
@ -157,9 +158,10 @@ The helpers manage the read request, calling back into the network filesystem
|
|||
through the suppplied table of operations. Waits will be performed as
|
||||
necessary before returning for helpers that are meant to be synchronous.
|
||||
|
||||
If an error occurs and netfs_priv is non-NULL, ops->cleanup() will be called to
|
||||
deal with it. If some parts of the request are in progress when an error
|
||||
occurs, the request will get partially completed if sufficient data is read.
|
||||
If an error occurs, the ->free_request() will be called to clean up the
|
||||
netfs_io_request struct allocated. If some parts of the request are in
|
||||
progress when an error occurs, the request will get partially completed if
|
||||
sufficient data is read.
|
||||
|
||||
Additionally, there is::
|
||||
|
||||
|
@ -207,8 +209,7 @@ The above fields are the ones the netfs can use. They are:
|
|||
* ``netfs_priv``
|
||||
|
||||
The network filesystem's private data. The value for this can be passed in
|
||||
to the helper functions or set during the request. The ->cleanup() op will
|
||||
be called if this is non-NULL at the end.
|
||||
to the helper functions or set during the request.
|
||||
|
||||
* ``start``
|
||||
* ``len``
|
||||
|
@ -293,6 +294,7 @@ through which it can issue requests and negotiate::
|
|||
|
||||
struct netfs_request_ops {
|
||||
void (*init_request)(struct netfs_io_request *rreq, struct file *file);
|
||||
void (*free_request)(struct netfs_io_request *rreq);
|
||||
int (*begin_cache_operation)(struct netfs_io_request *rreq);
|
||||
void (*expand_readahead)(struct netfs_io_request *rreq);
|
||||
bool (*clamp_length)(struct netfs_io_subrequest *subreq);
|
||||
|
@ -301,7 +303,6 @@ through which it can issue requests and negotiate::
|
|||
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
|
||||
struct folio *folio, void **_fsdata);
|
||||
void (*done)(struct netfs_io_request *rreq);
|
||||
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
|
||||
};
|
||||
|
||||
The operations are as follows:
|
||||
|
@ -309,7 +310,12 @@ The operations are as follows:
|
|||
* ``init_request()``
|
||||
|
||||
[Optional] This is called to initialise the request structure. It is given
|
||||
the file for reference and can modify the ->netfs_priv value.
|
||||
the file for reference.
|
||||
|
||||
* ``free_request()``
|
||||
|
||||
[Optional] This is called as the request is being deallocated so that the
|
||||
filesystem can clean up any state it has attached there.
|
||||
|
||||
* ``begin_cache_operation()``
|
||||
|
||||
|
@ -383,11 +389,6 @@ The operations are as follows:
|
|||
[Optional] This is called after the folios in the request have all been
|
||||
unlocked (and marked uptodate if applicable).
|
||||
|
||||
* ``cleanup``
|
||||
|
||||
[Optional] This is called as the request is being deallocated so that the
|
||||
filesystem can clean up ->netfs_priv.
|
||||
|
||||
|
||||
|
||||
Read Helper Procedure
|
||||
|
|
|
@ -124,7 +124,7 @@ static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
|
|||
static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inode)
|
||||
{
|
||||
#ifdef CONFIG_9P_FSCACHE
|
||||
return netfs_i_cookie(&v9inode->netfs.inode);
|
||||
return netfs_i_cookie(&v9inode->netfs);
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
|
|
|
@ -66,13 +66,12 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
|
|||
}
|
||||
|
||||
/**
|
||||
* v9fs_req_cleanup - Cleanup request initialized by v9fs_init_request
|
||||
* @mapping: unused mapping of request to cleanup
|
||||
* @priv: private data to cleanup, a fid, guaranted non-null.
|
||||
* v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
|
||||
* @rreq: The I/O request to clean up
|
||||
*/
|
||||
static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
|
||||
static void v9fs_free_request(struct netfs_io_request *rreq)
|
||||
{
|
||||
struct p9_fid *fid = priv;
|
||||
struct p9_fid *fid = rreq->netfs_priv;
|
||||
|
||||
p9_client_clunk(fid);
|
||||
}
|
||||
|
@ -94,9 +93,9 @@ static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
|
|||
|
||||
const struct netfs_request_ops v9fs_req_ops = {
|
||||
.init_request = v9fs_init_request,
|
||||
.free_request = v9fs_free_request,
|
||||
.begin_cache_operation = v9fs_begin_cache_operation,
|
||||
.issue_read = v9fs_issue_read,
|
||||
.cleanup = v9fs_req_cleanup,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -274,7 +273,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
|
|||
* file. We need to do this before we get a lock on the page in case
|
||||
* there's more than one writer competing for the same cache block.
|
||||
*/
|
||||
retval = netfs_write_begin(filp, mapping, pos, len, &folio, fsdata);
|
||||
retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
|
|
|
@ -252,7 +252,8 @@ void v9fs_free_inode(struct inode *inode)
|
|||
*/
|
||||
static void v9fs_set_netfs_context(struct inode *inode)
|
||||
{
|
||||
netfs_inode_init(inode, &v9fs_req_ops);
|
||||
struct v9fs_inode *v9inode = V9FS_I(inode);
|
||||
netfs_inode_init(&v9inode->netfs, &v9fs_req_ops);
|
||||
}
|
||||
|
||||
int v9fs_init_inode(struct v9fs_session_info *v9ses,
|
||||
|
|
|
@ -76,7 +76,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
|
|||
/* there shouldn't be an existing inode */
|
||||
BUG_ON(!(inode->i_state & I_NEW));
|
||||
|
||||
netfs_inode_init(inode, NULL);
|
||||
netfs_inode_init(&vnode->netfs, NULL);
|
||||
inode->i_size = 0;
|
||||
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
|
||||
if (root) {
|
||||
|
|
|
@ -382,17 +382,17 @@ static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
|
|||
return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
|
||||
}
|
||||
|
||||
static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv)
|
||||
static void afs_free_request(struct netfs_io_request *rreq)
|
||||
{
|
||||
key_put(netfs_priv);
|
||||
key_put(rreq->netfs_priv);
|
||||
}
|
||||
|
||||
const struct netfs_request_ops afs_req_ops = {
|
||||
.init_request = afs_init_request,
|
||||
.free_request = afs_free_request,
|
||||
.begin_cache_operation = afs_begin_cache_operation,
|
||||
.check_write_begin = afs_check_write_begin,
|
||||
.issue_read = afs_issue_read,
|
||||
.cleanup = afs_priv_cleanup,
|
||||
};
|
||||
|
||||
int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
|
|
|
@ -58,7 +58,7 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
|
|||
*/
|
||||
static void afs_set_netfs_context(struct afs_vnode *vnode)
|
||||
{
|
||||
netfs_inode_init(&vnode->netfs.inode, &afs_req_ops);
|
||||
netfs_inode_init(&vnode->netfs, &afs_req_ops);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -670,7 +670,7 @@ struct afs_vnode {
|
|||
static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode)
|
||||
{
|
||||
#ifdef CONFIG_AFS_FSCACHE
|
||||
return netfs_i_cookie(&vnode->netfs.inode);
|
||||
return netfs_i_cookie(&vnode->netfs);
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
|
|
|
@ -9,8 +9,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include "internal.h"
|
||||
|
||||
unsigned __read_mostly afs_volume_gc_delay = 10;
|
||||
unsigned __read_mostly afs_volume_record_life = 60 * 60;
|
||||
static unsigned __read_mostly afs_volume_record_life = 60 * 60;
|
||||
|
||||
/*
|
||||
* Insert a volume into a cell. If there's an existing volume record, that is
|
||||
|
|
|
@ -60,7 +60,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
|
|||
* file. We need to do this before we get a lock on the page in case
|
||||
* there's more than one writer competing for the same cache block.
|
||||
*/
|
||||
ret = netfs_write_begin(file, mapping, pos, len, &folio, fsdata);
|
||||
ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -394,11 +394,10 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
|
||||
static void ceph_netfs_free_request(struct netfs_io_request *rreq)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
int got = (uintptr_t)priv;
|
||||
struct ceph_inode_info *ci = ceph_inode(rreq->inode);
|
||||
int got = (uintptr_t)rreq->netfs_priv;
|
||||
|
||||
if (got)
|
||||
ceph_put_cap_refs(ci, got);
|
||||
|
@ -406,12 +405,12 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
|
|||
|
||||
const struct netfs_request_ops ceph_netfs_ops = {
|
||||
.init_request = ceph_init_request,
|
||||
.free_request = ceph_netfs_free_request,
|
||||
.begin_cache_operation = ceph_begin_cache_operation,
|
||||
.issue_read = ceph_netfs_issue_read,
|
||||
.expand_readahead = ceph_netfs_expand_readahead,
|
||||
.clamp_length = ceph_netfs_clamp_length,
|
||||
.check_write_begin = ceph_netfs_check_write_begin,
|
||||
.cleanup = ceph_readahead_cleanup,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CEPH_FSCACHE
|
||||
|
@ -1322,10 +1321,11 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
|
|||
struct page **pagep, void **fsdata)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
struct folio *folio = NULL;
|
||||
int r;
|
||||
|
||||
r = netfs_write_begin(file, inode->i_mapping, pos, len, &folio, NULL);
|
||||
r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL);
|
||||
if (r == 0)
|
||||
folio_wait_fscache(folio);
|
||||
if (r < 0) {
|
||||
|
|
|
@ -28,7 +28,7 @@ void ceph_fscache_invalidate(struct inode *inode, bool dio_write);
|
|||
|
||||
static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
|
||||
{
|
||||
return netfs_i_cookie(&ci->netfs.inode);
|
||||
return netfs_i_cookie(&ci->netfs);
|
||||
}
|
||||
|
||||
static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
|
||||
|
|
|
@ -460,7 +460,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
|
|||
dout("alloc_inode %p\n", &ci->netfs.inode);
|
||||
|
||||
/* Set parameters for the netfs library */
|
||||
netfs_inode_init(&ci->netfs.inode, &ceph_netfs_ops);
|
||||
netfs_inode_init(&ci->netfs, &ceph_netfs_ops);
|
||||
|
||||
spin_lock_init(&ci->i_ceph_lock);
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ void cifs_fscache_fill_coherency(struct inode *inode,
|
|||
|
||||
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
|
||||
{
|
||||
return netfs_i_cookie(inode);
|
||||
return netfs_i_cookie(&CIFS_I(inode)->netfs);
|
||||
}
|
||||
|
||||
static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags)
|
||||
|
|
|
@ -297,6 +297,7 @@ zero_out:
|
|||
|
||||
/**
|
||||
* netfs_write_begin - Helper to prepare for writing
|
||||
* @ctx: The netfs context
|
||||
* @file: The file to read from
|
||||
* @mapping: The mapping to read from
|
||||
* @pos: File position at which the write will begin
|
||||
|
@ -326,12 +327,12 @@ zero_out:
|
|||
*
|
||||
* This is usable whether or not caching is enabled.
|
||||
*/
|
||||
int netfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
int netfs_write_begin(struct netfs_inode *ctx,
|
||||
struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned int len, struct folio **_folio,
|
||||
void **_fsdata)
|
||||
{
|
||||
struct netfs_io_request *rreq;
|
||||
struct netfs_inode *ctx = netfs_inode(file_inode(file ));
|
||||
struct folio *folio;
|
||||
unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
|
||||
pgoff_t index = pos >> PAGE_SHIFT;
|
||||
|
|
|
@ -75,10 +75,10 @@ static void netfs_free_request(struct work_struct *work)
|
|||
struct netfs_io_request *rreq =
|
||||
container_of(work, struct netfs_io_request, work);
|
||||
|
||||
netfs_clear_subrequests(rreq, false);
|
||||
if (rreq->netfs_priv)
|
||||
rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_free);
|
||||
netfs_clear_subrequests(rreq, false);
|
||||
if (rreq->netfs_ops->free_request)
|
||||
rreq->netfs_ops->free_request(rreq);
|
||||
if (rreq->cache_resources.ops)
|
||||
rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
|
||||
kfree(rreq);
|
||||
|
|
|
@ -206,7 +206,9 @@ struct netfs_io_request {
|
|||
*/
|
||||
struct netfs_request_ops {
|
||||
int (*init_request)(struct netfs_io_request *rreq, struct file *file);
|
||||
void (*free_request)(struct netfs_io_request *rreq);
|
||||
int (*begin_cache_operation)(struct netfs_io_request *rreq);
|
||||
|
||||
void (*expand_readahead)(struct netfs_io_request *rreq);
|
||||
bool (*clamp_length)(struct netfs_io_subrequest *subreq);
|
||||
void (*issue_read)(struct netfs_io_subrequest *subreq);
|
||||
|
@ -214,7 +216,6 @@ struct netfs_request_ops {
|
|||
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
|
||||
struct folio *folio, void **_fsdata);
|
||||
void (*done)(struct netfs_io_request *rreq);
|
||||
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -277,7 +278,8 @@ struct netfs_cache_ops {
|
|||
struct readahead_control;
|
||||
extern void netfs_readahead(struct readahead_control *);
|
||||
int netfs_read_folio(struct file *, struct folio *);
|
||||
extern int netfs_write_begin(struct file *, struct address_space *,
|
||||
extern int netfs_write_begin(struct netfs_inode *,
|
||||
struct file *, struct address_space *,
|
||||
loff_t, unsigned int, struct folio **,
|
||||
void **);
|
||||
|
||||
|
@ -302,19 +304,17 @@ static inline struct netfs_inode *netfs_inode(struct inode *inode)
|
|||
|
||||
/**
|
||||
* netfs_inode_init - Initialise a netfslib inode context
|
||||
* @inode: The inode with which the context is associated
|
||||
* @inode: The netfs inode to initialise
|
||||
* @ops: The netfs's operations list
|
||||
*
|
||||
* Initialise the netfs library context struct. This is expected to follow on
|
||||
* directly from the VFS inode struct.
|
||||
*/
|
||||
static inline void netfs_inode_init(struct inode *inode,
|
||||
static inline void netfs_inode_init(struct netfs_inode *ctx,
|
||||
const struct netfs_request_ops *ops)
|
||||
{
|
||||
struct netfs_inode *ctx = netfs_inode(inode);
|
||||
|
||||
ctx->ops = ops;
|
||||
ctx->remote_i_size = i_size_read(inode);
|
||||
ctx->remote_i_size = i_size_read(&ctx->inode);
|
||||
#if IS_ENABLED(CONFIG_FSCACHE)
|
||||
ctx->cache = NULL;
|
||||
#endif
|
||||
|
@ -322,28 +322,25 @@ static inline void netfs_inode_init(struct inode *inode,
|
|||
|
||||
/**
|
||||
* netfs_resize_file - Note that a file got resized
|
||||
* @inode: The inode being resized
|
||||
* @ctx: The netfs inode being resized
|
||||
* @new_i_size: The new file size
|
||||
*
|
||||
* Inform the netfs lib that a file got resized so that it can adjust its state.
|
||||
*/
|
||||
static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size)
|
||||
static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size)
|
||||
{
|
||||
struct netfs_inode *ctx = netfs_inode(inode);
|
||||
|
||||
ctx->remote_i_size = new_i_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* netfs_i_cookie - Get the cache cookie from the inode
|
||||
* @inode: The inode to query
|
||||
* @ctx: The netfs inode to query
|
||||
*
|
||||
* Get the caching cookie (if enabled) from the network filesystem's inode.
|
||||
*/
|
||||
static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode)
|
||||
static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_FSCACHE)
|
||||
struct netfs_inode *ctx = netfs_inode(inode);
|
||||
return ctx->cache;
|
||||
#else
|
||||
return NULL;
|
||||
|
|
Loading…
Reference in New Issue