Merge branch '9p-iov_iter' into for-next
This commit is contained in:
commit
c48722c636
|
@ -68,14 +68,10 @@ int v9fs_file_open(struct inode *inode, struct file *file);
|
|||
void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
|
||||
int v9fs_uflags2omode(int uflags, int extended);
|
||||
|
||||
ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
|
||||
ssize_t v9fs_fid_readn(struct p9_fid *, char *, char __user *, u32, u64);
|
||||
void v9fs_blank_wstat(struct p9_wstat *wstat);
|
||||
int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *);
|
||||
int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
|
||||
int datasync);
|
||||
ssize_t v9fs_file_write_internal(struct inode *, struct p9_fid *,
|
||||
const char __user *, size_t, loff_t *, int);
|
||||
int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode);
|
||||
int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode);
|
||||
static inline void v9fs_invalidate_inode_attr(struct inode *inode)
|
||||
|
|
|
@ -51,12 +51,11 @@
|
|||
*/
|
||||
static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
|
||||
{
|
||||
int retval;
|
||||
loff_t offset;
|
||||
char *buffer;
|
||||
struct inode *inode;
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
|
||||
struct iov_iter to;
|
||||
int retval, err;
|
||||
|
||||
inode = page->mapping->host;
|
||||
p9_debug(P9_DEBUG_VFS, "\n");
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
|
@ -65,16 +64,16 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
|
|||
if (retval == 0)
|
||||
return retval;
|
||||
|
||||
buffer = kmap(page);
|
||||
offset = page_offset(page);
|
||||
iov_iter_bvec(&to, ITER_BVEC | READ, &bvec, 1, PAGE_SIZE);
|
||||
|
||||
retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset);
|
||||
if (retval < 0) {
|
||||
retval = p9_client_read(fid, page_offset(page), &to, &err);
|
||||
if (err) {
|
||||
v9fs_uncache_page(inode, page);
|
||||
retval = err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval);
|
||||
zero_user(page, retval, PAGE_SIZE - retval);
|
||||
flush_dcache_page(page);
|
||||
SetPageUptodate(page);
|
||||
|
||||
|
@ -82,7 +81,6 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
|
|||
retval = 0;
|
||||
|
||||
done:
|
||||
kunmap(page);
|
||||
unlock_page(page);
|
||||
return retval;
|
||||
}
|
||||
|
@ -161,41 +159,32 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset,
|
|||
|
||||
static int v9fs_vfs_writepage_locked(struct page *page)
|
||||
{
|
||||
char *buffer;
|
||||
int retval, len;
|
||||
loff_t offset, size;
|
||||
mm_segment_t old_fs;
|
||||
struct v9fs_inode *v9inode;
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct v9fs_inode *v9inode = V9FS_I(inode);
|
||||
loff_t size = i_size_read(inode);
|
||||
struct iov_iter from;
|
||||
struct bio_vec bvec;
|
||||
int err, len;
|
||||
|
||||
v9inode = V9FS_I(inode);
|
||||
size = i_size_read(inode);
|
||||
if (page->index == size >> PAGE_CACHE_SHIFT)
|
||||
len = size & ~PAGE_CACHE_MASK;
|
||||
else
|
||||
len = PAGE_CACHE_SIZE;
|
||||
|
||||
set_page_writeback(page);
|
||||
bvec.bv_page = page;
|
||||
bvec.bv_offset = 0;
|
||||
bvec.bv_len = len;
|
||||
iov_iter_bvec(&from, ITER_BVEC | WRITE, &bvec, 1, len);
|
||||
|
||||
buffer = kmap(page);
|
||||
offset = page_offset(page);
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(get_ds());
|
||||
/* We should have writeback_fid always set */
|
||||
BUG_ON(!v9inode->writeback_fid);
|
||||
|
||||
retval = v9fs_file_write_internal(inode,
|
||||
v9inode->writeback_fid,
|
||||
(__force const char __user *)buffer,
|
||||
len, &offset, 0);
|
||||
if (retval > 0)
|
||||
retval = 0;
|
||||
set_page_writeback(page);
|
||||
|
||||
p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err);
|
||||
|
||||
set_fs(old_fs);
|
||||
kunmap(page);
|
||||
end_page_writeback(page);
|
||||
return retval;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
|
||||
|
@ -261,16 +250,21 @@ static int v9fs_launder_page(struct page *page)
|
|||
static ssize_t
|
||||
v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
|
||||
{
|
||||
/*
|
||||
* FIXME
|
||||
* Now that we do caching with cache mode enabled, We need
|
||||
* to support direct IO
|
||||
*/
|
||||
p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%pD) off/no(%lld/%lu) EINVAL\n",
|
||||
iocb->ki_filp,
|
||||
(long long)pos, iter->nr_segs);
|
||||
|
||||
return -EINVAL;
|
||||
struct file *file = iocb->ki_filp;
|
||||
ssize_t n;
|
||||
int err = 0;
|
||||
if (rw & WRITE) {
|
||||
n = p9_client_write(file->private_data, pos, iter, &err);
|
||||
if (n) {
|
||||
struct inode *inode = file_inode(file);
|
||||
loff_t i_size = i_size_read(inode);
|
||||
if (pos + n > i_size)
|
||||
inode_add_bytes(inode, pos + n - i_size);
|
||||
}
|
||||
} else {
|
||||
n = p9_client_read(file->private_data, pos, iter, &err);
|
||||
}
|
||||
return n ? n : err;
|
||||
}
|
||||
|
||||
static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/inet.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uio.h>
|
||||
#include <net/9p/9p.h>
|
||||
#include <net/9p/client.h>
|
||||
|
||||
|
@ -115,6 +116,7 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
|
|||
int buflen;
|
||||
int reclen = 0;
|
||||
struct p9_rdir *rdir;
|
||||
struct kvec kvec;
|
||||
|
||||
p9_debug(P9_DEBUG_VFS, "name %pD\n", file);
|
||||
fid = file->private_data;
|
||||
|
@ -124,16 +126,21 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
|
|||
rdir = v9fs_alloc_rdir_buf(file, buflen);
|
||||
if (!rdir)
|
||||
return -ENOMEM;
|
||||
kvec.iov_base = rdir->buf;
|
||||
kvec.iov_len = buflen;
|
||||
|
||||
while (1) {
|
||||
if (rdir->tail == rdir->head) {
|
||||
err = v9fs_file_readn(file, rdir->buf, NULL,
|
||||
buflen, ctx->pos);
|
||||
if (err <= 0)
|
||||
struct iov_iter to;
|
||||
int n;
|
||||
iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buflen);
|
||||
n = p9_client_read(file->private_data, ctx->pos, &to,
|
||||
&err);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rdir->head = 0;
|
||||
rdir->tail = err;
|
||||
rdir->tail = n;
|
||||
}
|
||||
while (rdir->head < rdir->tail) {
|
||||
p9stat_init(&st);
|
||||
|
|
326
fs/9p/vfs_file.c
326
fs/9p/vfs_file.c
|
@ -36,6 +36,8 @@
|
|||
#include <linux/utsname.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/9p/9p.h>
|
||||
#include <net/9p/client.h>
|
||||
|
||||
|
@ -285,6 +287,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
|
|||
fl->fl_end = glock.start + glock.length - 1;
|
||||
fl->fl_pid = glock.proc_id;
|
||||
}
|
||||
kfree(glock.client_id);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -363,63 +366,6 @@ out_err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_fid_readn - read from a fid
|
||||
* @fid: fid to read
|
||||
* @data: data buffer to read data into
|
||||
* @udata: user data buffer to read data into
|
||||
* @count: size of buffer
|
||||
* @offset: offset at which to read data
|
||||
*
|
||||
*/
|
||||
ssize_t
|
||||
v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count,
|
||||
u64 offset)
|
||||
{
|
||||
int n, total, size;
|
||||
|
||||
p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n",
|
||||
fid->fid, (long long unsigned)offset, count);
|
||||
n = 0;
|
||||
total = 0;
|
||||
size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
|
||||
do {
|
||||
n = p9_client_read(fid, data, udata, offset, count);
|
||||
if (n <= 0)
|
||||
break;
|
||||
|
||||
if (data)
|
||||
data += n;
|
||||
if (udata)
|
||||
udata += n;
|
||||
|
||||
offset += n;
|
||||
count -= n;
|
||||
total += n;
|
||||
} while (count > 0 && n == size);
|
||||
|
||||
if (n < 0)
|
||||
total = n;
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_file_readn - read from a file
|
||||
* @filp: file pointer to read
|
||||
* @data: data buffer to read data into
|
||||
* @udata: user data buffer to read data into
|
||||
* @count: size of buffer
|
||||
* @offset: offset at which to read data
|
||||
*
|
||||
*/
|
||||
ssize_t
|
||||
v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
|
||||
u64 offset)
|
||||
{
|
||||
return v9fs_fid_readn(filp->private_data, data, udata, count, offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_file_read - read from a file
|
||||
* @filp: file pointer to read
|
||||
|
@ -430,69 +376,22 @@ v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
|
|||
*/
|
||||
|
||||
static ssize_t
|
||||
v9fs_file_read(struct file *filp, char __user *udata, size_t count,
|
||||
loff_t * offset)
|
||||
v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
int ret;
|
||||
struct p9_fid *fid;
|
||||
size_t size;
|
||||
struct p9_fid *fid = iocb->ki_filp->private_data;
|
||||
int ret, err;
|
||||
|
||||
p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
|
||||
fid = filp->private_data;
|
||||
p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
|
||||
iov_iter_count(to), iocb->ki_pos);
|
||||
|
||||
size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
|
||||
if (count > size)
|
||||
ret = v9fs_file_readn(filp, NULL, udata, count, *offset);
|
||||
else
|
||||
ret = p9_client_read(fid, NULL, udata, *offset, count);
|
||||
|
||||
if (ret > 0)
|
||||
*offset += ret;
|
||||
ret = p9_client_read(fid, iocb->ki_pos, to, &err);
|
||||
if (!ret)
|
||||
return err;
|
||||
|
||||
iocb->ki_pos += ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t
|
||||
v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
|
||||
const char __user *data, size_t count,
|
||||
loff_t *offset, int invalidate)
|
||||
{
|
||||
int n;
|
||||
loff_t i_size;
|
||||
size_t total = 0;
|
||||
loff_t origin = *offset;
|
||||
unsigned long pg_start, pg_end;
|
||||
|
||||
p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n",
|
||||
data, (int)count, (int)*offset);
|
||||
|
||||
do {
|
||||
n = p9_client_write(fid, NULL, data+total, origin+total, count);
|
||||
if (n <= 0)
|
||||
break;
|
||||
count -= n;
|
||||
total += n;
|
||||
} while (count > 0);
|
||||
|
||||
if (invalidate && (total > 0)) {
|
||||
pg_start = origin >> PAGE_CACHE_SHIFT;
|
||||
pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT;
|
||||
if (inode->i_mapping && inode->i_mapping->nrpages)
|
||||
invalidate_inode_pages2_range(inode->i_mapping,
|
||||
pg_start, pg_end);
|
||||
*offset += total;
|
||||
i_size = i_size_read(inode);
|
||||
if (*offset > i_size) {
|
||||
inode_add_bytes(inode, *offset - i_size);
|
||||
i_size_write(inode, *offset);
|
||||
}
|
||||
}
|
||||
if (n < 0)
|
||||
return n;
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_file_write - write to a file
|
||||
* @filp: file pointer to write
|
||||
|
@ -502,35 +401,45 @@ v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
|
|||
*
|
||||
*/
|
||||
static ssize_t
|
||||
v9fs_file_write(struct file *filp, const char __user * data,
|
||||
size_t count, loff_t *offset)
|
||||
v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
ssize_t retval = 0;
|
||||
loff_t origin = *offset;
|
||||
loff_t origin = iocb->ki_pos;
|
||||
size_t count = iov_iter_count(from);
|
||||
int err = 0;
|
||||
|
||||
|
||||
retval = generic_write_checks(filp, &origin, &count, 0);
|
||||
retval = generic_write_checks(file, &origin, &count, 0);
|
||||
if (retval)
|
||||
goto out;
|
||||
return retval;
|
||||
|
||||
iov_iter_truncate(from, count);
|
||||
|
||||
retval = -EINVAL;
|
||||
if ((ssize_t) count < 0)
|
||||
goto out;
|
||||
retval = 0;
|
||||
if (!count)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
retval = v9fs_file_write_internal(file_inode(filp),
|
||||
filp->private_data,
|
||||
data, count, &origin, 1);
|
||||
/* update offset on successful write */
|
||||
if (retval > 0)
|
||||
*offset = origin;
|
||||
out:
|
||||
return retval;
|
||||
retval = p9_client_write(file->private_data, origin, from, &err);
|
||||
if (retval > 0) {
|
||||
struct inode *inode = file_inode(file);
|
||||
loff_t i_size;
|
||||
unsigned long pg_start, pg_end;
|
||||
pg_start = origin >> PAGE_CACHE_SHIFT;
|
||||
pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT;
|
||||
if (inode->i_mapping && inode->i_mapping->nrpages)
|
||||
invalidate_inode_pages2_range(inode->i_mapping,
|
||||
pg_start, pg_end);
|
||||
origin += retval;
|
||||
i_size = i_size_read(inode);
|
||||
iocb->ki_pos = origin;
|
||||
if (origin > i_size) {
|
||||
inode_add_bytes(inode, origin - i_size);
|
||||
i_size_write(inode, origin);
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
|
||||
int datasync)
|
||||
{
|
||||
|
@ -657,44 +566,6 @@ out_unlock:
|
|||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
v9fs_direct_read(struct file *filp, char __user *udata, size_t count,
|
||||
loff_t *offsetp)
|
||||
{
|
||||
loff_t size, offset;
|
||||
struct inode *inode;
|
||||
struct address_space *mapping;
|
||||
|
||||
offset = *offsetp;
|
||||
mapping = filp->f_mapping;
|
||||
inode = mapping->host;
|
||||
if (!count)
|
||||
return 0;
|
||||
size = i_size_read(inode);
|
||||
if (offset < size)
|
||||
filemap_write_and_wait_range(mapping, offset,
|
||||
offset + count - 1);
|
||||
|
||||
return v9fs_file_read(filp, udata, count, offsetp);
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_cached_file_read - read from a file
|
||||
* @filp: file pointer to read
|
||||
* @data: user data buffer to read data into
|
||||
* @count: size of buffer
|
||||
* @offset: offset at which to read data
|
||||
*
|
||||
*/
|
||||
static ssize_t
|
||||
v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
|
||||
loff_t *offset)
|
||||
{
|
||||
if (filp->f_flags & O_DIRECT)
|
||||
return v9fs_direct_read(filp, data, count, offset);
|
||||
return new_sync_read(filp, data, count, offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_mmap_file_read - read from a file
|
||||
* @filp: file pointer to read
|
||||
|
@ -704,84 +575,12 @@ v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
|
|||
*
|
||||
*/
|
||||
static ssize_t
|
||||
v9fs_mmap_file_read(struct file *filp, char __user *data, size_t count,
|
||||
loff_t *offset)
|
||||
v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
/* TODO: Check if there are dirty pages */
|
||||
return v9fs_file_read(filp, data, count, offset);
|
||||
return v9fs_file_read_iter(iocb, to);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
v9fs_direct_write(struct file *filp, const char __user * data,
|
||||
size_t count, loff_t *offsetp)
|
||||
{
|
||||
loff_t offset;
|
||||
ssize_t retval;
|
||||
struct inode *inode;
|
||||
struct address_space *mapping;
|
||||
|
||||
offset = *offsetp;
|
||||
mapping = filp->f_mapping;
|
||||
inode = mapping->host;
|
||||
if (!count)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
retval = filemap_write_and_wait_range(mapping, offset,
|
||||
offset + count - 1);
|
||||
if (retval)
|
||||
goto err_out;
|
||||
/*
|
||||
* After a write we want buffered reads to be sure to go to disk to get
|
||||
* the new data. We invalidate clean cached page from the region we're
|
||||
* about to write. We do this *before* the write so that if we fail
|
||||
* here we fall back to buffered write
|
||||
*/
|
||||
if (mapping->nrpages) {
|
||||
pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT;
|
||||
pgoff_t pg_end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
|
||||
|
||||
retval = invalidate_inode_pages2_range(mapping,
|
||||
pg_start, pg_end);
|
||||
/*
|
||||
* If a page can not be invalidated, fall back
|
||||
* to buffered write.
|
||||
*/
|
||||
if (retval) {
|
||||
if (retval == -EBUSY)
|
||||
goto buff_write;
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
retval = v9fs_file_write(filp, data, count, offsetp);
|
||||
err_out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return retval;
|
||||
|
||||
buff_write:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return new_sync_write(filp, data, count, offsetp);
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_cached_file_write - write to a file
|
||||
* @filp: file pointer to write
|
||||
* @data: data buffer to write data from
|
||||
* @count: size of buffer
|
||||
* @offset: offset at which to write data
|
||||
*
|
||||
*/
|
||||
static ssize_t
|
||||
v9fs_cached_file_write(struct file *filp, const char __user * data,
|
||||
size_t count, loff_t *offset)
|
||||
{
|
||||
|
||||
if (filp->f_flags & O_DIRECT)
|
||||
return v9fs_direct_write(filp, data, count, offset);
|
||||
return new_sync_write(filp, data, count, offset);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* v9fs_mmap_file_write - write to a file
|
||||
* @filp: file pointer to write
|
||||
|
@ -791,14 +590,13 @@ v9fs_cached_file_write(struct file *filp, const char __user * data,
|
|||
*
|
||||
*/
|
||||
static ssize_t
|
||||
v9fs_mmap_file_write(struct file *filp, const char __user *data,
|
||||
size_t count, loff_t *offset)
|
||||
v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
/*
|
||||
* TODO: invalidate mmaps on filp's inode between
|
||||
* offset and offset+count
|
||||
*/
|
||||
return v9fs_file_write(filp, data, count, offset);
|
||||
return v9fs_file_write_iter(iocb, from);
|
||||
}
|
||||
|
||||
static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
|
||||
|
@ -843,8 +641,8 @@ static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
|
|||
|
||||
const struct file_operations v9fs_cached_file_operations = {
|
||||
.llseek = generic_file_llseek,
|
||||
.read = v9fs_cached_file_read,
|
||||
.write = v9fs_cached_file_write,
|
||||
.read = new_sync_read,
|
||||
.write = new_sync_write,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.open = v9fs_file_open,
|
||||
|
@ -856,8 +654,8 @@ const struct file_operations v9fs_cached_file_operations = {
|
|||
|
||||
const struct file_operations v9fs_cached_file_operations_dotl = {
|
||||
.llseek = generic_file_llseek,
|
||||
.read = v9fs_cached_file_read,
|
||||
.write = v9fs_cached_file_write,
|
||||
.read = new_sync_read,
|
||||
.write = new_sync_write,
|
||||
.read_iter = generic_file_read_iter,
|
||||
.write_iter = generic_file_write_iter,
|
||||
.open = v9fs_file_open,
|
||||
|
@ -870,8 +668,10 @@ const struct file_operations v9fs_cached_file_operations_dotl = {
|
|||
|
||||
const struct file_operations v9fs_file_operations = {
|
||||
.llseek = generic_file_llseek,
|
||||
.read = v9fs_file_read,
|
||||
.write = v9fs_file_write,
|
||||
.read = new_sync_read,
|
||||
.write = new_sync_write,
|
||||
.read_iter = v9fs_file_read_iter,
|
||||
.write_iter = v9fs_file_write_iter,
|
||||
.open = v9fs_file_open,
|
||||
.release = v9fs_dir_release,
|
||||
.lock = v9fs_file_lock,
|
||||
|
@ -881,8 +681,10 @@ const struct file_operations v9fs_file_operations = {
|
|||
|
||||
const struct file_operations v9fs_file_operations_dotl = {
|
||||
.llseek = generic_file_llseek,
|
||||
.read = v9fs_file_read,
|
||||
.write = v9fs_file_write,
|
||||
.read = new_sync_read,
|
||||
.write = new_sync_write,
|
||||
.read_iter = v9fs_file_read_iter,
|
||||
.write_iter = v9fs_file_write_iter,
|
||||
.open = v9fs_file_open,
|
||||
.release = v9fs_dir_release,
|
||||
.lock = v9fs_file_lock_dotl,
|
||||
|
@ -893,8 +695,10 @@ const struct file_operations v9fs_file_operations_dotl = {
|
|||
|
||||
const struct file_operations v9fs_mmap_file_operations = {
|
||||
.llseek = generic_file_llseek,
|
||||
.read = v9fs_mmap_file_read,
|
||||
.write = v9fs_mmap_file_write,
|
||||
.read = new_sync_read,
|
||||
.write = new_sync_write,
|
||||
.read_iter = v9fs_mmap_file_read_iter,
|
||||
.write_iter = v9fs_mmap_file_write_iter,
|
||||
.open = v9fs_file_open,
|
||||
.release = v9fs_dir_release,
|
||||
.lock = v9fs_file_lock,
|
||||
|
@ -904,8 +708,10 @@ const struct file_operations v9fs_mmap_file_operations = {
|
|||
|
||||
const struct file_operations v9fs_mmap_file_operations_dotl = {
|
||||
.llseek = generic_file_llseek,
|
||||
.read = v9fs_mmap_file_read,
|
||||
.write = v9fs_mmap_file_write,
|
||||
.read = new_sync_read,
|
||||
.write = new_sync_write,
|
||||
.read_iter = v9fs_mmap_file_read_iter,
|
||||
.write_iter = v9fs_mmap_file_write_iter,
|
||||
.open = v9fs_file_open,
|
||||
.release = v9fs_dir_release,
|
||||
.lock = v9fs_file_lock_dotl,
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/uio.h>
|
||||
#include <net/9p/9p.h>
|
||||
#include <net/9p/client.h>
|
||||
|
||||
|
@ -25,50 +26,34 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
|
|||
void *buffer, size_t buffer_size)
|
||||
{
|
||||
ssize_t retval;
|
||||
int msize, read_count;
|
||||
u64 offset = 0, attr_size;
|
||||
u64 attr_size;
|
||||
struct p9_fid *attr_fid;
|
||||
struct kvec kvec = {.iov_base = buffer, .iov_len = buffer_size};
|
||||
struct iov_iter to;
|
||||
int err;
|
||||
|
||||
iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buffer_size);
|
||||
|
||||
attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
|
||||
if (IS_ERR(attr_fid)) {
|
||||
retval = PTR_ERR(attr_fid);
|
||||
p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n",
|
||||
retval);
|
||||
attr_fid = NULL;
|
||||
goto error;
|
||||
}
|
||||
if (!buffer_size) {
|
||||
/* request to get the attr_size */
|
||||
retval = attr_size;
|
||||
goto error;
|
||||
return retval;
|
||||
}
|
||||
if (attr_size > buffer_size) {
|
||||
retval = -ERANGE;
|
||||
goto error;
|
||||
}
|
||||
msize = attr_fid->clnt->msize;
|
||||
while (attr_size) {
|
||||
if (attr_size > (msize - P9_IOHDRSZ))
|
||||
read_count = msize - P9_IOHDRSZ;
|
||||
if (!buffer_size) /* request to get the attr_size */
|
||||
retval = attr_size;
|
||||
else
|
||||
read_count = attr_size;
|
||||
read_count = p9_client_read(attr_fid, ((char *)buffer)+offset,
|
||||
NULL, offset, read_count);
|
||||
if (read_count < 0) {
|
||||
/* error in xattr read */
|
||||
retval = read_count;
|
||||
goto error;
|
||||
}
|
||||
offset += read_count;
|
||||
attr_size -= read_count;
|
||||
retval = -ERANGE;
|
||||
} else {
|
||||
iov_iter_truncate(&to, attr_size);
|
||||
retval = p9_client_read(attr_fid, 0, &to, &err);
|
||||
if (err)
|
||||
retval = err;
|
||||
}
|
||||
/* Total read xattr bytes */
|
||||
retval = offset;
|
||||
error:
|
||||
if (attr_fid)
|
||||
p9_client_clunk(attr_fid);
|
||||
p9_client_clunk(attr_fid);
|
||||
return retval;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -120,8 +105,11 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
|
|||
int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
|
||||
const void *value, size_t value_len, int flags)
|
||||
{
|
||||
u64 offset = 0;
|
||||
int retval, msize, write_count;
|
||||
struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
|
||||
struct iov_iter from;
|
||||
int retval;
|
||||
|
||||
iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
|
||||
|
||||
p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
|
||||
name, value_len, flags);
|
||||
|
@ -135,29 +123,11 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
|
|||
* On success fid points to xattr
|
||||
*/
|
||||
retval = p9_client_xattrcreate(fid, name, value_len, flags);
|
||||
if (retval < 0) {
|
||||
if (retval < 0)
|
||||
p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n",
|
||||
retval);
|
||||
goto err;
|
||||
}
|
||||
msize = fid->clnt->msize;
|
||||
while (value_len) {
|
||||
if (value_len > (msize - P9_IOHDRSZ))
|
||||
write_count = msize - P9_IOHDRSZ;
|
||||
else
|
||||
write_count = value_len;
|
||||
write_count = p9_client_write(fid, ((char *)value)+offset,
|
||||
NULL, offset, write_count);
|
||||
if (write_count < 0) {
|
||||
/* error in xattr write */
|
||||
retval = write_count;
|
||||
goto err;
|
||||
}
|
||||
offset += write_count;
|
||||
value_len -= write_count;
|
||||
}
|
||||
retval = 0;
|
||||
err:
|
||||
else
|
||||
p9_client_write(fid, 0, &from, &retval);
|
||||
p9_client_clunk(fid);
|
||||
return retval;
|
||||
}
|
||||
|
|
|
@ -211,6 +211,8 @@ struct p9_dirent {
|
|||
char d_name[256];
|
||||
};
|
||||
|
||||
struct iov_iter;
|
||||
|
||||
int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb);
|
||||
int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid,
|
||||
const char *name);
|
||||
|
@ -236,10 +238,8 @@ int p9_client_clunk(struct p9_fid *fid);
|
|||
int p9_client_fsync(struct p9_fid *fid, int datasync);
|
||||
int p9_client_remove(struct p9_fid *fid);
|
||||
int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags);
|
||||
int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
|
||||
u64 offset, u32 count);
|
||||
int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
|
||||
u64 offset, u32 count);
|
||||
int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err);
|
||||
int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
|
||||
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
|
||||
int p9dirent_read(struct p9_client *clnt, char *buf, int len,
|
||||
struct p9_dirent *dirent);
|
||||
|
|
|
@ -61,7 +61,7 @@ struct p9_trans_module {
|
|||
int (*cancel) (struct p9_client *, struct p9_req_t *req);
|
||||
int (*cancelled)(struct p9_client *, struct p9_req_t *req);
|
||||
int (*zc_request)(struct p9_client *, struct p9_req_t *,
|
||||
char *, char *, int , int, int, int);
|
||||
struct iov_iter *, struct iov_iter *, int , int, int);
|
||||
};
|
||||
|
||||
void v9fs_register_trans(struct p9_trans_module *m);
|
||||
|
|
260
net/9p/client.c
260
net/9p/client.c
|
@ -34,6 +34,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/uio.h>
|
||||
#include <net/9p/9p.h>
|
||||
#include <linux/parser.h>
|
||||
#include <net/9p/client.h>
|
||||
|
@ -555,7 +556,7 @@ out_err:
|
|||
*/
|
||||
|
||||
static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
|
||||
char *uidata, int in_hdrlen, int kern_buf)
|
||||
struct iov_iter *uidata, int in_hdrlen)
|
||||
{
|
||||
int err;
|
||||
int ecode;
|
||||
|
@ -591,16 +592,11 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
|
|||
ename = &req->rc->sdata[req->rc->offset];
|
||||
if (len > inline_len) {
|
||||
/* We have error in external buffer */
|
||||
if (kern_buf) {
|
||||
memcpy(ename + inline_len, uidata,
|
||||
len - inline_len);
|
||||
} else {
|
||||
err = copy_from_user(ename + inline_len,
|
||||
uidata, len - inline_len);
|
||||
if (err) {
|
||||
err = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
err = copy_from_iter(ename + inline_len,
|
||||
len - inline_len, uidata);
|
||||
if (err != len - inline_len) {
|
||||
err = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
ename = NULL;
|
||||
|
@ -806,8 +802,8 @@ reterr:
|
|||
* p9_client_zc_rpc - issue a request and wait for a response
|
||||
* @c: client session
|
||||
* @type: type of request
|
||||
* @uidata: user bffer that should be ued for zero copy read
|
||||
* @uodata: user buffer that shoud be user for zero copy write
|
||||
* @uidata: destination for zero copy read
|
||||
* @uodata: source for zero copy write
|
||||
* @inlen: read buffer size
|
||||
* @olen: write buffer size
|
||||
* @hdrlen: reader header size, This is the size of response protocol data
|
||||
|
@ -816,9 +812,10 @@ reterr:
|
|||
* Returns request structure (which client must free using p9_free_req)
|
||||
*/
|
||||
static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
|
||||
char *uidata, char *uodata,
|
||||
struct iov_iter *uidata,
|
||||
struct iov_iter *uodata,
|
||||
int inlen, int olen, int in_hdrlen,
|
||||
int kern_buf, const char *fmt, ...)
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int sigpending, err;
|
||||
|
@ -841,12 +838,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
|
|||
} else
|
||||
sigpending = 0;
|
||||
|
||||
/* If we are called with KERNEL_DS force kern_buf */
|
||||
if (segment_eq(get_fs(), KERNEL_DS))
|
||||
kern_buf = 1;
|
||||
|
||||
err = c->trans_mod->zc_request(c, req, uidata, uodata,
|
||||
inlen, olen, in_hdrlen, kern_buf);
|
||||
inlen, olen, in_hdrlen);
|
||||
if (err < 0) {
|
||||
if (err == -EIO)
|
||||
c->status = Disconnected;
|
||||
|
@ -876,7 +869,7 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
|
|||
if (err < 0)
|
||||
goto reterr;
|
||||
|
||||
err = p9_check_zc_errors(c, req, uidata, in_hdrlen, kern_buf);
|
||||
err = p9_check_zc_errors(c, req, uidata, in_hdrlen);
|
||||
trace_9p_client_res(c, type, req->rc->tag, err);
|
||||
if (!err)
|
||||
return req;
|
||||
|
@ -1123,6 +1116,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
|
|||
fid = NULL;
|
||||
goto error;
|
||||
}
|
||||
fid->uid = n_uname;
|
||||
|
||||
req = p9_client_rpc(clnt, P9_TATTACH, "ddss?u", fid->fid,
|
||||
afid ? afid->fid : P9_NOFID, uname, aname, n_uname);
|
||||
|
@ -1541,142 +1535,128 @@ error:
|
|||
EXPORT_SYMBOL(p9_client_unlinkat);
|
||||
|
||||
int
|
||||
p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
|
||||
u32 count)
|
||||
p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
|
||||
{
|
||||
char *dataptr;
|
||||
int kernel_buf = 0;
|
||||
struct p9_client *clnt = fid->clnt;
|
||||
struct p9_req_t *req;
|
||||
struct p9_client *clnt;
|
||||
int err, rsize, non_zc = 0;
|
||||
|
||||
int total = 0;
|
||||
|
||||
p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
|
||||
fid->fid, (unsigned long long) offset, count);
|
||||
err = 0;
|
||||
clnt = fid->clnt;
|
||||
fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
|
||||
|
||||
rsize = fid->iounit;
|
||||
if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
|
||||
rsize = clnt->msize - P9_IOHDRSZ;
|
||||
while (iov_iter_count(to)) {
|
||||
int count = iov_iter_count(to);
|
||||
int rsize, non_zc = 0;
|
||||
char *dataptr;
|
||||
|
||||
rsize = fid->iounit;
|
||||
if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
|
||||
rsize = clnt->msize - P9_IOHDRSZ;
|
||||
|
||||
if (count < rsize)
|
||||
rsize = count;
|
||||
if (count < rsize)
|
||||
rsize = count;
|
||||
|
||||
/* Don't bother zerocopy for small IO (< 1024) */
|
||||
if (clnt->trans_mod->zc_request && rsize > 1024) {
|
||||
char *indata;
|
||||
if (data) {
|
||||
kernel_buf = 1;
|
||||
indata = data;
|
||||
} else
|
||||
indata = (__force char *)udata;
|
||||
/*
|
||||
* response header len is 11
|
||||
* PDU Header(7) + IO Size (4)
|
||||
*/
|
||||
req = p9_client_zc_rpc(clnt, P9_TREAD, indata, NULL, rsize, 0,
|
||||
11, kernel_buf, "dqd", fid->fid,
|
||||
offset, rsize);
|
||||
} else {
|
||||
non_zc = 1;
|
||||
req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
|
||||
rsize);
|
||||
}
|
||||
if (IS_ERR(req)) {
|
||||
err = PTR_ERR(req);
|
||||
goto error;
|
||||
}
|
||||
|
||||
err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
|
||||
if (err) {
|
||||
trace_9p_protocol_dump(clnt, req->rc);
|
||||
goto free_and_error;
|
||||
}
|
||||
|
||||
p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
|
||||
|
||||
if (non_zc) {
|
||||
if (data) {
|
||||
memmove(data, dataptr, count);
|
||||
/* Don't bother zerocopy for small IO (< 1024) */
|
||||
if (clnt->trans_mod->zc_request && rsize > 1024) {
|
||||
/*
|
||||
* response header len is 11
|
||||
* PDU Header(7) + IO Size (4)
|
||||
*/
|
||||
req = p9_client_zc_rpc(clnt, P9_TREAD, to, NULL, rsize,
|
||||
0, 11, "dqd", fid->fid,
|
||||
offset, rsize);
|
||||
} else {
|
||||
err = copy_to_user(udata, dataptr, count);
|
||||
if (err) {
|
||||
err = -EFAULT;
|
||||
goto free_and_error;
|
||||
}
|
||||
non_zc = 1;
|
||||
req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
|
||||
rsize);
|
||||
}
|
||||
if (IS_ERR(req)) {
|
||||
*err = PTR_ERR(req);
|
||||
break;
|
||||
}
|
||||
}
|
||||
p9_free_req(clnt, req);
|
||||
return count;
|
||||
|
||||
free_and_error:
|
||||
p9_free_req(clnt, req);
|
||||
error:
|
||||
return err;
|
||||
*err = p9pdu_readf(req->rc, clnt->proto_version,
|
||||
"D", &count, &dataptr);
|
||||
if (*err) {
|
||||
trace_9p_protocol_dump(clnt, req->rc);
|
||||
p9_free_req(clnt, req);
|
||||
break;
|
||||
}
|
||||
|
||||
p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
|
||||
if (!count) {
|
||||
p9_free_req(clnt, req);
|
||||
break;
|
||||
}
|
||||
|
||||
if (non_zc) {
|
||||
int n = copy_to_iter(dataptr, count, to);
|
||||
total += n;
|
||||
offset += n;
|
||||
if (n != count) {
|
||||
*err = -EFAULT;
|
||||
p9_free_req(clnt, req);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
iov_iter_advance(to, count);
|
||||
total += count;
|
||||
offset += count;
|
||||
}
|
||||
p9_free_req(clnt, req);
|
||||
}
|
||||
return total;
|
||||
}
|
||||
EXPORT_SYMBOL(p9_client_read);
|
||||
|
||||
int
|
||||
p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
|
||||
u64 offset, u32 count)
|
||||
p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
|
||||
{
|
||||
int err, rsize;
|
||||
int kernel_buf = 0;
|
||||
struct p9_client *clnt;
|
||||
struct p9_client *clnt = fid->clnt;
|
||||
struct p9_req_t *req;
|
||||
int total = 0;
|
||||
|
||||
p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
|
||||
fid->fid, (unsigned long long) offset, count);
|
||||
err = 0;
|
||||
clnt = fid->clnt;
|
||||
p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
|
||||
fid->fid, (unsigned long long) offset,
|
||||
iov_iter_count(from));
|
||||
|
||||
rsize = fid->iounit;
|
||||
if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
|
||||
rsize = clnt->msize - P9_IOHDRSZ;
|
||||
while (iov_iter_count(from)) {
|
||||
int count = iov_iter_count(from);
|
||||
int rsize = fid->iounit;
|
||||
if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
|
||||
rsize = clnt->msize - P9_IOHDRSZ;
|
||||
|
||||
if (count < rsize)
|
||||
rsize = count;
|
||||
if (count < rsize)
|
||||
rsize = count;
|
||||
|
||||
/* Don't bother zerocopy for small IO (< 1024) */
|
||||
if (clnt->trans_mod->zc_request && rsize > 1024) {
|
||||
char *odata;
|
||||
if (data) {
|
||||
kernel_buf = 1;
|
||||
odata = data;
|
||||
} else
|
||||
odata = (char *)udata;
|
||||
req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
|
||||
P9_ZC_HDR_SZ, kernel_buf, "dqd",
|
||||
fid->fid, offset, rsize);
|
||||
} else {
|
||||
if (data)
|
||||
req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid,
|
||||
offset, rsize, data);
|
||||
else
|
||||
req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid,
|
||||
offset, rsize, udata);
|
||||
/* Don't bother zerocopy for small IO (< 1024) */
|
||||
if (clnt->trans_mod->zc_request && rsize > 1024) {
|
||||
req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0,
|
||||
rsize, P9_ZC_HDR_SZ, "dqd",
|
||||
fid->fid, offset, rsize);
|
||||
} else {
|
||||
req = p9_client_rpc(clnt, P9_TWRITE, "dqV", fid->fid,
|
||||
offset, rsize, from);
|
||||
}
|
||||
if (IS_ERR(req)) {
|
||||
*err = PTR_ERR(req);
|
||||
break;
|
||||
}
|
||||
|
||||
*err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
|
||||
if (*err) {
|
||||
trace_9p_protocol_dump(clnt, req->rc);
|
||||
p9_free_req(clnt, req);
|
||||
}
|
||||
|
||||
p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
|
||||
|
||||
p9_free_req(clnt, req);
|
||||
iov_iter_advance(from, count);
|
||||
total += count;
|
||||
offset += count;
|
||||
}
|
||||
if (IS_ERR(req)) {
|
||||
err = PTR_ERR(req);
|
||||
goto error;
|
||||
}
|
||||
|
||||
err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
|
||||
if (err) {
|
||||
trace_9p_protocol_dump(clnt, req->rc);
|
||||
goto free_and_error;
|
||||
}
|
||||
|
||||
p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
|
||||
|
||||
p9_free_req(clnt, req);
|
||||
return count;
|
||||
|
||||
free_and_error:
|
||||
p9_free_req(clnt, req);
|
||||
error:
|
||||
return err;
|
||||
return total;
|
||||
}
|
||||
EXPORT_SYMBOL(p9_client_write);
|
||||
|
||||
|
@ -2068,6 +2048,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
|
|||
struct p9_client *clnt;
|
||||
struct p9_req_t *req;
|
||||
char *dataptr;
|
||||
struct kvec kv = {.iov_base = data, .iov_len = count};
|
||||
struct iov_iter to;
|
||||
|
||||
iov_iter_kvec(&to, READ | ITER_KVEC, &kv, 1, count);
|
||||
|
||||
p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
|
||||
fid->fid, (unsigned long long) offset, count);
|
||||
|
@ -2088,8 +2072,8 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
|
|||
* response header len is 11
|
||||
* PDU Header(7) + IO Size (4)
|
||||
*/
|
||||
req = p9_client_zc_rpc(clnt, P9_TREADDIR, data, NULL, rsize, 0,
|
||||
11, 1, "dqd", fid->fid, offset, rsize);
|
||||
req = p9_client_zc_rpc(clnt, P9_TREADDIR, &to, NULL, rsize, 0,
|
||||
11, "dqd", fid->fid, offset, rsize);
|
||||
} else {
|
||||
non_zc = 1;
|
||||
req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uio.h>
|
||||
#include <net/9p/9p.h>
|
||||
#include <net/9p/client.h>
|
||||
#include "protocol.h"
|
||||
|
@ -69,10 +70,11 @@ static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
|
|||
}
|
||||
|
||||
static size_t
|
||||
pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
|
||||
pdu_write_u(struct p9_fcall *pdu, struct iov_iter *from, size_t size)
|
||||
{
|
||||
size_t len = min(pdu->capacity - pdu->size, size);
|
||||
if (copy_from_user(&pdu->sdata[pdu->size], udata, len))
|
||||
struct iov_iter i = *from;
|
||||
if (copy_from_iter(&pdu->sdata[pdu->size], len, &i) != len)
|
||||
len = 0;
|
||||
|
||||
pdu->size += len;
|
||||
|
@ -437,23 +439,13 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
|
|||
stbuf->extension, stbuf->n_uid,
|
||||
stbuf->n_gid, stbuf->n_muid);
|
||||
} break;
|
||||
case 'D':{
|
||||
uint32_t count = va_arg(ap, uint32_t);
|
||||
const void *data = va_arg(ap, const void *);
|
||||
|
||||
errcode = p9pdu_writef(pdu, proto_version, "d",
|
||||
count);
|
||||
if (!errcode && pdu_write(pdu, data, count))
|
||||
errcode = -EFAULT;
|
||||
}
|
||||
break;
|
||||
case 'U':{
|
||||
case 'V':{
|
||||
int32_t count = va_arg(ap, int32_t);
|
||||
const char __user *udata =
|
||||
va_arg(ap, const void __user *);
|
||||
struct iov_iter *from =
|
||||
va_arg(ap, struct iov_iter *);
|
||||
errcode = p9pdu_writef(pdu, proto_version, "d",
|
||||
count);
|
||||
if (!errcode && pdu_write_u(pdu, udata, count))
|
||||
if (!errcode && pdu_write_u(pdu, from, count))
|
||||
errcode = -EFAULT;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -12,12 +12,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <net/9p/9p.h>
|
||||
#include <net/9p/client.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include "trans_common.h"
|
||||
|
||||
/**
|
||||
* p9_release_req_pages - Release pages after the transaction.
|
||||
|
@ -31,39 +27,3 @@ void p9_release_pages(struct page **pages, int nr_pages)
|
|||
put_page(pages[i]);
|
||||
}
|
||||
EXPORT_SYMBOL(p9_release_pages);
|
||||
|
||||
/**
|
||||
* p9_nr_pages - Return number of pages needed to accommodate the payload.
|
||||
*/
|
||||
int p9_nr_pages(char *data, int len)
|
||||
{
|
||||
unsigned long start_page, end_page;
|
||||
start_page = (unsigned long)data >> PAGE_SHIFT;
|
||||
end_page = ((unsigned long)data + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
return end_page - start_page;
|
||||
}
|
||||
EXPORT_SYMBOL(p9_nr_pages);
|
||||
|
||||
/**
|
||||
* payload_gup - Translates user buffer into kernel pages and
|
||||
* pins them either for read/write through get_user_pages_fast().
|
||||
* @req: Request to be sent to server.
|
||||
* @pdata_off: data offset into the first page after translation (gup).
|
||||
* @pdata_len: Total length of the IO. gup may not return requested # of pages.
|
||||
* @nr_pages: number of pages to accommodate the payload
|
||||
* @rw: Indicates if the pages are for read or write.
|
||||
*/
|
||||
|
||||
int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write)
|
||||
{
|
||||
int nr_mapped_pages;
|
||||
|
||||
nr_mapped_pages = get_user_pages_fast((unsigned long)data,
|
||||
*nr_pages, write, pages);
|
||||
if (nr_mapped_pages <= 0)
|
||||
return nr_mapped_pages;
|
||||
|
||||
*nr_pages = nr_mapped_pages;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(p9_payload_gup);
|
||||
|
|
|
@ -13,5 +13,3 @@
|
|||
*/
|
||||
|
||||
void p9_release_pages(struct page **, int);
|
||||
int p9_payload_gup(char *, int *, struct page **, int);
|
||||
int p9_nr_pages(char *, int);
|
||||
|
|
|
@ -217,15 +217,15 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
|
|||
* @start: which segment of the sg_list to start at
|
||||
* @pdata: a list of pages to add into sg.
|
||||
* @nr_pages: number of pages to pack into the scatter/gather list
|
||||
* @data: data to pack into scatter/gather list
|
||||
* @offs: amount of data in the beginning of first page _not_ to pack
|
||||
* @count: amount of data to pack into the scatter/gather list
|
||||
*/
|
||||
static int
|
||||
pack_sg_list_p(struct scatterlist *sg, int start, int limit,
|
||||
struct page **pdata, int nr_pages, char *data, int count)
|
||||
struct page **pdata, int nr_pages, size_t offs, int count)
|
||||
{
|
||||
int i = 0, s;
|
||||
int data_off;
|
||||
int data_off = offs;
|
||||
int index = start;
|
||||
|
||||
BUG_ON(nr_pages > (limit - start));
|
||||
|
@ -233,16 +233,14 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
|
|||
* if the first page doesn't start at
|
||||
* page boundary find the offset
|
||||
*/
|
||||
data_off = offset_in_page(data);
|
||||
while (nr_pages) {
|
||||
s = rest_of_page(data);
|
||||
s = PAGE_SIZE - data_off;
|
||||
if (s > count)
|
||||
s = count;
|
||||
/* Make sure we don't terminate early. */
|
||||
sg_unmark_end(&sg[index]);
|
||||
sg_set_page(&sg[index++], pdata[i++], s, data_off);
|
||||
data_off = 0;
|
||||
data += s;
|
||||
count -= s;
|
||||
nr_pages--;
|
||||
}
|
||||
|
@ -314,11 +312,20 @@ req_retry:
|
|||
}
|
||||
|
||||
static int p9_get_mapped_pages(struct virtio_chan *chan,
|
||||
struct page **pages, char *data,
|
||||
int nr_pages, int write, int kern_buf)
|
||||
struct page ***pages,
|
||||
struct iov_iter *data,
|
||||
int count,
|
||||
size_t *offs,
|
||||
int *need_drop)
|
||||
{
|
||||
int nr_pages;
|
||||
int err;
|
||||
if (!kern_buf) {
|
||||
|
||||
if (!iov_iter_count(data))
|
||||
return 0;
|
||||
|
||||
if (!(data->type & ITER_KVEC)) {
|
||||
int n;
|
||||
/*
|
||||
* We allow only p9_max_pages pinned. We wait for the
|
||||
* Other zc request to finish here
|
||||
|
@ -329,26 +336,49 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
|
|||
if (err == -ERESTARTSYS)
|
||||
return err;
|
||||
}
|
||||
err = p9_payload_gup(data, &nr_pages, pages, write);
|
||||
if (err < 0)
|
||||
return err;
|
||||
n = iov_iter_get_pages_alloc(data, pages, count, offs);
|
||||
if (n < 0)
|
||||
return n;
|
||||
*need_drop = 1;
|
||||
nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE);
|
||||
atomic_add(nr_pages, &vp_pinned);
|
||||
return n;
|
||||
} else {
|
||||
/* kernel buffer, no need to pin pages */
|
||||
int s, index = 0;
|
||||
int count = nr_pages;
|
||||
while (nr_pages) {
|
||||
s = rest_of_page(data);
|
||||
if (is_vmalloc_addr(data))
|
||||
pages[index++] = vmalloc_to_page(data);
|
||||
else
|
||||
pages[index++] = kmap_to_page(data);
|
||||
data += s;
|
||||
nr_pages--;
|
||||
int index;
|
||||
size_t len;
|
||||
void *p;
|
||||
|
||||
/* we'd already checked that it's non-empty */
|
||||
while (1) {
|
||||
len = iov_iter_single_seg_count(data);
|
||||
if (likely(len)) {
|
||||
p = data->kvec->iov_base + data->iov_offset;
|
||||
break;
|
||||
}
|
||||
iov_iter_advance(data, 0);
|
||||
}
|
||||
nr_pages = count;
|
||||
if (len > count)
|
||||
len = count;
|
||||
|
||||
nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
|
||||
(unsigned long)p / PAGE_SIZE;
|
||||
|
||||
*pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
|
||||
if (!*pages)
|
||||
return -ENOMEM;
|
||||
|
||||
*need_drop = 0;
|
||||
p -= (*offs = (unsigned long)p % PAGE_SIZE);
|
||||
for (index = 0; index < nr_pages; index++) {
|
||||
if (is_vmalloc_addr(p))
|
||||
(*pages)[index] = vmalloc_to_page(p);
|
||||
else
|
||||
(*pages)[index] = kmap_to_page(p);
|
||||
p += PAGE_SIZE;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
return nr_pages;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -364,8 +394,8 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
|
|||
*/
|
||||
static int
|
||||
p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
|
||||
char *uidata, char *uodata, int inlen,
|
||||
int outlen, int in_hdr_len, int kern_buf)
|
||||
struct iov_iter *uidata, struct iov_iter *uodata,
|
||||
int inlen, int outlen, int in_hdr_len)
|
||||
{
|
||||
int in, out, err, out_sgs, in_sgs;
|
||||
unsigned long flags;
|
||||
|
@ -373,41 +403,32 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
|
|||
struct page **in_pages = NULL, **out_pages = NULL;
|
||||
struct virtio_chan *chan = client->trans;
|
||||
struct scatterlist *sgs[4];
|
||||
size_t offs;
|
||||
int need_drop = 0;
|
||||
|
||||
p9_debug(P9_DEBUG_TRANS, "virtio request\n");
|
||||
|
||||
if (uodata) {
|
||||
out_nr_pages = p9_nr_pages(uodata, outlen);
|
||||
out_pages = kmalloc(sizeof(struct page *) * out_nr_pages,
|
||||
GFP_NOFS);
|
||||
if (!out_pages) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
int n = p9_get_mapped_pages(chan, &out_pages, uodata,
|
||||
outlen, &offs, &need_drop);
|
||||
if (n < 0)
|
||||
return n;
|
||||
out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
|
||||
if (n != outlen) {
|
||||
__le32 v = cpu_to_le32(n);
|
||||
memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
|
||||
outlen = n;
|
||||
}
|
||||
out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata,
|
||||
out_nr_pages, 0, kern_buf);
|
||||
if (out_nr_pages < 0) {
|
||||
err = out_nr_pages;
|
||||
kfree(out_pages);
|
||||
out_pages = NULL;
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
if (uidata) {
|
||||
in_nr_pages = p9_nr_pages(uidata, inlen);
|
||||
in_pages = kmalloc(sizeof(struct page *) * in_nr_pages,
|
||||
GFP_NOFS);
|
||||
if (!in_pages) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata,
|
||||
in_nr_pages, 1, kern_buf);
|
||||
if (in_nr_pages < 0) {
|
||||
err = in_nr_pages;
|
||||
kfree(in_pages);
|
||||
in_pages = NULL;
|
||||
goto err_out;
|
||||
} else if (uidata) {
|
||||
int n = p9_get_mapped_pages(chan, &in_pages, uidata,
|
||||
inlen, &offs, &need_drop);
|
||||
if (n < 0)
|
||||
return n;
|
||||
in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
|
||||
if (n != inlen) {
|
||||
__le32 v = cpu_to_le32(n);
|
||||
memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
|
||||
inlen = n;
|
||||
}
|
||||
}
|
||||
req->status = REQ_STATUS_SENT;
|
||||
|
@ -426,7 +447,7 @@ req_retry_pinned:
|
|||
if (out_pages) {
|
||||
sgs[out_sgs++] = chan->sg + out;
|
||||
out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
|
||||
out_pages, out_nr_pages, uodata, outlen);
|
||||
out_pages, out_nr_pages, offs, outlen);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -444,7 +465,7 @@ req_retry_pinned:
|
|||
if (in_pages) {
|
||||
sgs[out_sgs + in_sgs++] = chan->sg + out + in;
|
||||
in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
|
||||
in_pages, in_nr_pages, uidata, inlen);
|
||||
in_pages, in_nr_pages, offs, inlen);
|
||||
}
|
||||
|
||||
BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
|
||||
|
@ -478,7 +499,7 @@ req_retry_pinned:
|
|||
* Non kernel buffers are pinned, unpin them
|
||||
*/
|
||||
err_out:
|
||||
if (!kern_buf) {
|
||||
if (need_drop) {
|
||||
if (in_pages) {
|
||||
p9_release_pages(in_pages, in_nr_pages);
|
||||
atomic_sub(in_nr_pages, &vp_pinned);
|
||||
|
|
Loading…
Reference in New Issue