fuse: allocate page array more efficiently

When allocating page array for a request the array for the page pointers
and the array for page descriptors are allocated by two separate kmalloc()
calls.  Merge these into one allocation.

Also instead of initializing the request and the page arrays with memset(),
use the zeroing allocation variants.

Reserved requests never carry pages (page array size is zero). Make that
explicit by initializing the page array pointers to NULL and make sure the
assumption remains true by adding a WARN_ON().

Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
Miklos Szeredi 2018-10-01 10:07:05 +02:00
parent ab2257e994
commit 8a7aa286ab
1 changed files with 17 additions and 25 deletions

View File

@ -44,9 +44,6 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages,
struct fuse_page_desc *page_descs, struct fuse_page_desc *page_descs,
unsigned npages) unsigned npages)
{ {
memset(req, 0, sizeof(*req));
memset(pages, 0, sizeof(*pages) * npages);
memset(page_descs, 0, sizeof(*page_descs) * npages);
INIT_LIST_HEAD(&req->list); INIT_LIST_HEAD(&req->list);
INIT_LIST_HEAD(&req->intr_entry); INIT_LIST_HEAD(&req->intr_entry);
init_waitqueue_head(&req->waitq); init_waitqueue_head(&req->waitq);
@ -59,29 +56,23 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages,
static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
{ {
struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags); struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
if (req) { if (req) {
struct page **pages; struct page **pages = NULL;
struct fuse_page_desc *page_descs; struct fuse_page_desc *page_descs = NULL;
if (npages <= FUSE_REQ_INLINE_PAGES) { if (npages > FUSE_REQ_INLINE_PAGES) {
pages = req->inline_pages; pages = kzalloc(npages * (sizeof(*pages) +
page_descs = req->inline_page_descs; sizeof(*page_descs)), flags);
} else { if (!pages) {
pages = kmalloc_array(npages, sizeof(struct page *),
flags);
page_descs =
kmalloc_array(npages,
sizeof(struct fuse_page_desc),
flags);
}
if (!pages || !page_descs) {
kfree(pages);
kfree(page_descs);
kmem_cache_free(fuse_req_cachep, req); kmem_cache_free(fuse_req_cachep, req);
return NULL; return NULL;
} }
page_descs = (void *) pages + npages * sizeof(*pages);
} else if (npages) {
pages = req->inline_pages;
page_descs = req->inline_page_descs;
}
fuse_request_init(req, pages, page_descs, npages); fuse_request_init(req, pages, page_descs, npages);
} }
@ -101,10 +92,9 @@ struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
void fuse_request_free(struct fuse_req *req) void fuse_request_free(struct fuse_req *req)
{ {
if (req->pages != req->inline_pages) { if (req->pages != req->inline_pages)
kfree(req->pages); kfree(req->pages);
kfree(req->page_descs);
}
kmem_cache_free(fuse_req_cachep, req); kmem_cache_free(fuse_req_cachep, req);
} }
@ -239,8 +229,10 @@ static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
struct file *file = req->stolen_file; struct file *file = req->stolen_file;
struct fuse_file *ff = file->private_data; struct fuse_file *ff = file->private_data;
WARN_ON(req->max_pages);
spin_lock(&fc->lock); spin_lock(&fc->lock);
fuse_request_init(req, req->pages, req->page_descs, req->max_pages); memset(req, 0, sizeof(*req));
fuse_request_init(req, NULL, NULL, 0);
BUG_ON(ff->reserved_req); BUG_ON(ff->reserved_req);
ff->reserved_req = req; ff->reserved_req = req;
wake_up_all(&fc->reserved_req_waitq); wake_up_all(&fc->reserved_req_waitq);