sunrpc: clean-up cache downcall

We can simplify code around cache_downcall unifying memory
allocations using kvmalloc. This has the benefit of getting rid of
cache_slow_downcall (and queue_io_mutex), and also matches userland
allocation size and limits.

Signed-off-by: Roberto Bergantinos Corpas <rbergant@redhat.com>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
Roberto Bergantinos Corpas 2020-11-27 19:38:31 +01:00 committed by Chuck Lever
parent 4420440c57
commit 4b5cff7ed8
1 changed files with 11 additions and 30 deletions

View File

@ -778,7 +778,6 @@ void cache_clean_deferred(void *owner)
*/
static DEFINE_SPINLOCK(queue_lock);
static DEFINE_MUTEX(queue_io_mutex);
struct cache_queue {
struct list_head list;
@ -906,44 +905,26 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
return ret;
}
static ssize_t cache_slow_downcall(const char __user *buf,
size_t count, struct cache_detail *cd)
{
static char write_buf[32768]; /* protected by queue_io_mutex */
ssize_t ret = -EINVAL;
if (count >= sizeof(write_buf))
goto out;
mutex_lock(&queue_io_mutex);
ret = cache_do_downcall(write_buf, buf, count, cd);
mutex_unlock(&queue_io_mutex);
out:
return ret;
}
static ssize_t cache_downcall(struct address_space *mapping,
const char __user *buf,
size_t count, struct cache_detail *cd)
{
struct page *page;
char *kaddr;
char *write_buf;
ssize_t ret = -ENOMEM;
if (count >= PAGE_SIZE)
goto out_slow;
if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */
ret = -EINVAL;
goto out;
}
page = find_or_create_page(mapping, 0, GFP_KERNEL);
if (!page)
goto out_slow;
write_buf = kvmalloc(count + 1, GFP_KERNEL);
if (!write_buf)
goto out;
kaddr = kmap(page);
ret = cache_do_downcall(kaddr, buf, count, cd);
kunmap(page);
unlock_page(page);
put_page(page);
ret = cache_do_downcall(write_buf, buf, count, cd);
kvfree(write_buf);
out:
return ret;
out_slow:
return cache_slow_downcall(buf, count, cd);
}
static ssize_t cache_write(struct file *filp, const char __user *buf,