[iov_iter] new primitives - copy_from_iter_full() and friends
copy_from_iter_full(), copy_from_iter_full_nocache() and csum_and_copy_from_iter_full() - counterparts of copy_from_iter() et.al., advancing iterator only in case of successful full copy and returning whether it had been successful or not. Convert some obvious users. *NOTE* - do not blindly assume that something is a good candidate for those unless you are sure that not advancing iov_iter in failure case is the right thing in this case. Anything that does short read/short write kind of stuff (or is in a loop, etc.) is unlikely to be a good one. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
e5517c2a5a
commit
cbbd26b8b1
|
@ -181,7 +181,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
|
|||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_iter(skb_put(skb, len), len, from) != len) {
|
||||
if (!copy_from_iter_full(skb_put(skb, len), len, from)) {
|
||||
kfree_skb(skb);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
|
|
@ -673,7 +673,6 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
|||
int depth;
|
||||
bool zerocopy = false;
|
||||
size_t linear;
|
||||
ssize_t n;
|
||||
|
||||
if (q->flags & IFF_VNET_HDR) {
|
||||
vnet_hdr_len = q->vnet_hdr_sz;
|
||||
|
@ -684,8 +683,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
|||
len -= vnet_hdr_len;
|
||||
|
||||
err = -EFAULT;
|
||||
n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
|
||||
if (n != sizeof(vnet_hdr))
|
||||
if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
|
||||
goto err;
|
||||
iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
|
||||
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
|
||||
|
|
|
@ -1171,7 +1171,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||
bool zerocopy = false;
|
||||
int err;
|
||||
u32 rxhash;
|
||||
ssize_t n;
|
||||
|
||||
if (!(tun->dev->flags & IFF_UP))
|
||||
return -EIO;
|
||||
|
@ -1181,8 +1180,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||
return -EINVAL;
|
||||
len -= sizeof(pi);
|
||||
|
||||
n = copy_from_iter(&pi, sizeof(pi), from);
|
||||
if (n != sizeof(pi))
|
||||
if (!copy_from_iter_full(&pi, sizeof(pi), from))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
@ -1191,8 +1189,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||
return -EINVAL;
|
||||
len -= tun->vnet_hdr_sz;
|
||||
|
||||
n = copy_from_iter(&gso, sizeof(gso), from);
|
||||
if (n != sizeof(gso))
|
||||
if (!copy_from_iter_full(&gso, sizeof(gso), from))
|
||||
return -EFAULT;
|
||||
|
||||
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
|
||||
|
|
|
@ -949,7 +949,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
|
|||
goto error_mutex;
|
||||
}
|
||||
if (!io_data->read &&
|
||||
copy_from_iter(data, data_len, &io_data->data) != data_len) {
|
||||
!copy_from_iter_full(data, data_len, &io_data->data)) {
|
||||
ret = -EFAULT;
|
||||
goto error_mutex;
|
||||
}
|
||||
|
|
|
@ -667,7 +667,7 @@ ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (unlikely(copy_from_iter(buf, len, from) != len)) {
|
||||
if (unlikely(!copy_from_iter_full(buf, len, from))) {
|
||||
value = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -922,8 +922,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
|||
*/
|
||||
iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
|
||||
|
||||
ret = copy_from_iter(req, req_size, &out_iter);
|
||||
if (unlikely(ret != req_size)) {
|
||||
if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
|
||||
vq_err(vq, "Faulted on copy_from_iter\n");
|
||||
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||
continue;
|
||||
|
|
|
@ -1862,8 +1862,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
|
|||
i, count);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
|
||||
sizeof(desc))) {
|
||||
if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
|
||||
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
|
||||
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -203,7 +203,7 @@ ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
bufsize - (pos % bufsize),
|
||||
iov_iter_count(from));
|
||||
|
||||
if (copy_from_iter(bouncebuffer, to_write, from) != to_write) {
|
||||
if (!copy_from_iter_full(bouncebuffer, to_write, from)) {
|
||||
errno = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -355,7 +355,6 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
|
|||
__u64 tag;
|
||||
} head;
|
||||
int total = ret = iov_iter_count(iter);
|
||||
int n;
|
||||
int downcall_size = sizeof(struct orangefs_downcall_s);
|
||||
int head_size = sizeof(head);
|
||||
|
||||
|
@ -372,8 +371,7 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
n = copy_from_iter(&head, head_size, iter);
|
||||
if (n < head_size) {
|
||||
if (!copy_from_iter_full(&head, head_size, iter)) {
|
||||
gossip_err("%s: failed to copy head.\n", __func__);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -407,8 +405,7 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
|
|||
return ret;
|
||||
}
|
||||
|
||||
n = copy_from_iter(&op->downcall, downcall_size, iter);
|
||||
if (n != downcall_size) {
|
||||
if (!copy_from_iter_full(&op->downcall, downcall_size, iter)) {
|
||||
gossip_err("%s: failed to copy downcall.\n", __func__);
|
||||
goto Efault;
|
||||
}
|
||||
|
@ -462,10 +459,8 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
|
|||
goto Enomem;
|
||||
}
|
||||
memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
|
||||
n = copy_from_iter(op->downcall.trailer_buf,
|
||||
op->downcall.trailer_size,
|
||||
iter);
|
||||
if (n != op->downcall.trailer_size) {
|
||||
if (!copy_from_iter_full(op->downcall.trailer_buf,
|
||||
op->downcall.trailer_size, iter)) {
|
||||
gossip_err("%s: failed to copy trailer.\n", __func__);
|
||||
vfree(op->downcall.trailer_buf);
|
||||
goto Efault;
|
||||
|
|
|
@ -89,7 +89,9 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
|
|||
struct iov_iter *i);
|
||||
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
|
||||
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
|
||||
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
|
||||
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
|
||||
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
|
||||
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
|
||||
unsigned long iov_iter_alignment(const struct iov_iter *i);
|
||||
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
|
||||
|
@ -155,6 +157,7 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
|
|||
}
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
|
||||
int import_iovec(int type, const struct iovec __user * uvector,
|
||||
unsigned nr_segs, unsigned fast_segs,
|
||||
|
|
|
@ -748,7 +748,7 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
return -ENOMEM;
|
||||
|
||||
buf[len] = '\0';
|
||||
if (copy_from_iter(buf, len, from) != len) {
|
||||
if (!copy_from_iter_full(buf, len, from)) {
|
||||
kfree(buf);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
|
|
@ -568,6 +568,31 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
|
|||
}
|
||||
EXPORT_SYMBOL(copy_from_iter);
|
||||
|
||||
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
char *to = addr;
|
||||
if (unlikely(i->type & ITER_PIPE)) {
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
if (unlikely(i->count < bytes)) \
|
||||
return false;
|
||||
|
||||
iterate_all_kinds(i, bytes, v, ({
|
||||
if (__copy_from_user((to += v.iov_len) - v.iov_len,
|
||||
v.iov_base, v.iov_len))
|
||||
return false;
|
||||
0;}),
|
||||
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
|
||||
v.bv_offset, v.bv_len),
|
||||
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
|
||||
)
|
||||
|
||||
iov_iter_advance(i, bytes);
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(copy_from_iter_full);
|
||||
|
||||
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
char *to = addr;
|
||||
|
@ -587,6 +612,30 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
|
|||
}
|
||||
EXPORT_SYMBOL(copy_from_iter_nocache);
|
||||
|
||||
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
char *to = addr;
|
||||
if (unlikely(i->type & ITER_PIPE)) {
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
if (unlikely(i->count < bytes)) \
|
||||
return false;
|
||||
iterate_all_kinds(i, bytes, v, ({
|
||||
if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
|
||||
v.iov_base, v.iov_len))
|
||||
return false;
|
||||
0;}),
|
||||
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
|
||||
v.bv_offset, v.bv_len),
|
||||
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
|
||||
)
|
||||
|
||||
iov_iter_advance(i, bytes);
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(copy_from_iter_full_nocache);
|
||||
|
||||
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
|
@ -1037,6 +1086,51 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
|
|||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_from_iter);
|
||||
|
||||
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
char *to = addr;
|
||||
__wsum sum, next;
|
||||
size_t off = 0;
|
||||
sum = *csum;
|
||||
if (unlikely(i->type & ITER_PIPE)) {
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
if (unlikely(i->count < bytes))
|
||||
return false;
|
||||
iterate_all_kinds(i, bytes, v, ({
|
||||
int err = 0;
|
||||
next = csum_and_copy_from_user(v.iov_base,
|
||||
(to += v.iov_len) - v.iov_len,
|
||||
v.iov_len, 0, &err);
|
||||
if (err)
|
||||
return false;
|
||||
sum = csum_block_add(sum, next, off);
|
||||
off += v.iov_len;
|
||||
0;
|
||||
}), ({
|
||||
char *p = kmap_atomic(v.bv_page);
|
||||
next = csum_partial_copy_nocheck(p + v.bv_offset,
|
||||
(to += v.bv_len) - v.bv_len,
|
||||
v.bv_len, 0);
|
||||
kunmap_atomic(p);
|
||||
sum = csum_block_add(sum, next, off);
|
||||
off += v.bv_len;
|
||||
}),({
|
||||
next = csum_partial_copy_nocheck(v.iov_base,
|
||||
(to += v.iov_len) - v.iov_len,
|
||||
v.iov_len, 0);
|
||||
sum = csum_block_add(sum, next, off);
|
||||
off += v.iov_len;
|
||||
})
|
||||
)
|
||||
*csum = sum;
|
||||
iov_iter_advance(i, bytes);
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
|
||||
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
|
|
|
@ -630,7 +630,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
|
|||
goto out;
|
||||
skb->dev = NULL; /* for paths shared with net_device interfaces */
|
||||
ATM_SKB(skb)->atm_options = vcc->atm_options;
|
||||
if (copy_from_iter(skb_put(skb, size), size, &m->msg_iter) != size) {
|
||||
if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
|
||||
kfree_skb(skb);
|
||||
error = -EFAULT;
|
||||
goto out;
|
||||
|
|
|
@ -2127,7 +2127,7 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
|
|||
struct sk_buff **frag;
|
||||
int sent = 0;
|
||||
|
||||
if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
|
||||
if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
|
||||
return -EFAULT;
|
||||
|
||||
sent += count;
|
||||
|
@ -2147,8 +2147,8 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
|
|||
|
||||
*frag = tmp;
|
||||
|
||||
if (copy_from_iter(skb_put(*frag, count), count,
|
||||
&msg->msg_iter) != count)
|
||||
if (!copy_from_iter_full(skb_put(*frag, count), count,
|
||||
&msg->msg_iter))
|
||||
return -EFAULT;
|
||||
|
||||
sent += count;
|
||||
|
|
|
@ -2432,14 +2432,11 @@ static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
|
|||
static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
|
||||
struct virtio_net_hdr *vnet_hdr)
|
||||
{
|
||||
int n;
|
||||
|
||||
if (*len < sizeof(*vnet_hdr))
|
||||
return -EINVAL;
|
||||
*len -= sizeof(*vnet_hdr);
|
||||
|
||||
n = copy_from_iter(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter);
|
||||
if (n != sizeof(*vnet_hdr))
|
||||
if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
|
||||
return -EFAULT;
|
||||
|
||||
return __packet_snd_vnet_parse(vnet_hdr, *len);
|
||||
|
|
|
@ -268,7 +268,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
|
|||
__skb_queue_tail(list, skb);
|
||||
skb_copy_to_linear_data(skb, mhdr, mhsz);
|
||||
pktpos = skb->data + mhsz;
|
||||
if (copy_from_iter(pktpos, dsz, &m->msg_iter) == dsz)
|
||||
if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
|
||||
return dsz;
|
||||
rc = -EFAULT;
|
||||
goto error;
|
||||
|
@ -299,7 +299,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
|
|||
if (drem < pktrem)
|
||||
pktrem = drem;
|
||||
|
||||
if (copy_from_iter(pktpos, pktrem, &m->msg_iter) != pktrem) {
|
||||
if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
|
||||
rc = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
|
|
|
@ -1074,7 +1074,7 @@ long keyctl_instantiate_key_common(key_serial_t id,
|
|||
}
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_from_iter(payload, plen, from) != plen)
|
||||
if (!copy_from_iter_full(payload, plen, from))
|
||||
goto error2;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue