nfs: call nfs_can_coalesce_requests for every req

Call nfs_can_coalesce_requests for every request, even the first one.
This is needed for future patches to give pg_test a way to inform
add_request to reduce the size of the request.

Now @prev can be null in nfs_can_coalesce_requests and pg_test functions.

Signed-off-by: Weston Andros Adamson <dros@primarydata.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
This commit is contained in:
Weston Andros Adamson 2014-05-15 11:56:44 -04:00 committed by Trond Myklebust
parent b4fdac1a51
commit ab75e41719
2 changed files with 22 additions and 15 deletions

View File

@ -929,6 +929,9 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
!nfs_generic_pg_test(pgio, prev, req)) !nfs_generic_pg_test(pgio, prev, req))
return 0; return 0;
if (!prev)
return req->wb_bytes;
p_stripe = (u64)req_offset(prev); p_stripe = (u64)req_offset(prev);
r_stripe = (u64)req_offset(req); r_stripe = (u64)req_offset(req);
stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit; stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;

View File

@ -292,6 +292,8 @@ nfs_wait_on_request(struct nfs_page *req)
size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
struct nfs_page *prev, struct nfs_page *req) struct nfs_page *prev, struct nfs_page *req)
{ {
if (!prev)
return req->wb_bytes;
/* /*
* FIXME: ideally we should be able to coalesce all requests * FIXME: ideally we should be able to coalesce all requests
* that are not block boundary aligned, but currently this * that are not block boundary aligned, but currently this
@ -761,17 +763,20 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
{ {
size_t size; size_t size;
if (!nfs_match_open_context(req->wb_context, prev->wb_context)) if (prev) {
return false; if (!nfs_match_open_context(req->wb_context, prev->wb_context))
if (req->wb_context->dentry->d_inode->i_flock != NULL && return false;
!nfs_match_lock_context(req->wb_lock_context, prev->wb_lock_context)) if (req->wb_context->dentry->d_inode->i_flock != NULL &&
return false; !nfs_match_lock_context(req->wb_lock_context,
if (req->wb_pgbase != 0) prev->wb_lock_context))
return false; return false;
if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) if (req->wb_pgbase != 0)
return false; return false;
if (req_offset(req) != req_offset(prev) + prev->wb_bytes) if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
return false; return false;
if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
return false;
}
size = pgio->pg_ops->pg_test(pgio, prev, req); size = pgio->pg_ops->pg_test(pgio, prev, req);
WARN_ON_ONCE(size && size != req->wb_bytes); WARN_ON_ONCE(size && size != req->wb_bytes);
return size > 0; return size > 0;
@ -788,17 +793,16 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *req) struct nfs_page *req)
{ {
struct nfs_page *prev = NULL;
if (desc->pg_count != 0) { if (desc->pg_count != 0) {
struct nfs_page *prev;
prev = nfs_list_entry(desc->pg_list.prev); prev = nfs_list_entry(desc->pg_list.prev);
if (!nfs_can_coalesce_requests(prev, req, desc))
return 0;
} else { } else {
if (desc->pg_ops->pg_init) if (desc->pg_ops->pg_init)
desc->pg_ops->pg_init(desc, req); desc->pg_ops->pg_init(desc, req);
desc->pg_base = req->wb_pgbase; desc->pg_base = req->wb_pgbase;
} }
if (!nfs_can_coalesce_requests(prev, req, desc))
return 0;
nfs_list_remove_request(req); nfs_list_remove_request(req);
nfs_list_add_request(req, &desc->pg_list); nfs_list_add_request(req, &desc->pg_list);
desc->pg_count += req->wb_bytes; desc->pg_count += req->wb_bytes;