copy_page_{to,from}_iter(): switch iovec variants to generic
we can do copyin/copyout under kmap_local_page(); it shouldn't overflow the kmap stack - the maximal footprint increase only by one here. Reviewed-by: Jeff Layton <jlayton@kernel.org> Reviewed-by: Christian Brauner (Microsoft) <brauner@kernel.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
164f4064ca
commit
59bb69c67c
191
lib/iov_iter.c
191
lib/iov_iter.c
|
@ -168,174 +168,6 @@ static int copyin(void *to, const void __user *from, size_t n)
|
|||
return n;
|
||||
}
|
||||
|
||||
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
size_t skip, copy, left, wanted;
|
||||
const struct iovec *iov;
|
||||
char __user *buf;
|
||||
void *kaddr, *from;
|
||||
|
||||
if (unlikely(bytes > i->count))
|
||||
bytes = i->count;
|
||||
|
||||
if (unlikely(!bytes))
|
||||
return 0;
|
||||
|
||||
might_fault();
|
||||
wanted = bytes;
|
||||
iov = i->iov;
|
||||
skip = i->iov_offset;
|
||||
buf = iov->iov_base + skip;
|
||||
copy = min(bytes, iov->iov_len - skip);
|
||||
|
||||
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
|
||||
kaddr = kmap_atomic(page);
|
||||
from = kaddr + offset;
|
||||
|
||||
/* first chunk, usually the only one */
|
||||
left = copyout(buf, from, copy);
|
||||
copy -= left;
|
||||
skip += copy;
|
||||
from += copy;
|
||||
bytes -= copy;
|
||||
|
||||
while (unlikely(!left && bytes)) {
|
||||
iov++;
|
||||
buf = iov->iov_base;
|
||||
copy = min(bytes, iov->iov_len);
|
||||
left = copyout(buf, from, copy);
|
||||
copy -= left;
|
||||
skip = copy;
|
||||
from += copy;
|
||||
bytes -= copy;
|
||||
}
|
||||
if (likely(!bytes)) {
|
||||
kunmap_atomic(kaddr);
|
||||
goto done;
|
||||
}
|
||||
offset = from - kaddr;
|
||||
buf += copy;
|
||||
kunmap_atomic(kaddr);
|
||||
copy = min(bytes, iov->iov_len - skip);
|
||||
}
|
||||
/* Too bad - revert to non-atomic kmap */
|
||||
|
||||
kaddr = kmap(page);
|
||||
from = kaddr + offset;
|
||||
left = copyout(buf, from, copy);
|
||||
copy -= left;
|
||||
skip += copy;
|
||||
from += copy;
|
||||
bytes -= copy;
|
||||
while (unlikely(!left && bytes)) {
|
||||
iov++;
|
||||
buf = iov->iov_base;
|
||||
copy = min(bytes, iov->iov_len);
|
||||
left = copyout(buf, from, copy);
|
||||
copy -= left;
|
||||
skip = copy;
|
||||
from += copy;
|
||||
bytes -= copy;
|
||||
}
|
||||
kunmap(page);
|
||||
|
||||
done:
|
||||
if (skip == iov->iov_len) {
|
||||
iov++;
|
||||
skip = 0;
|
||||
}
|
||||
i->count -= wanted - bytes;
|
||||
i->nr_segs -= iov - i->iov;
|
||||
i->iov = iov;
|
||||
i->iov_offset = skip;
|
||||
return wanted - bytes;
|
||||
}
|
||||
|
||||
static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
size_t skip, copy, left, wanted;
|
||||
const struct iovec *iov;
|
||||
char __user *buf;
|
||||
void *kaddr, *to;
|
||||
|
||||
if (unlikely(bytes > i->count))
|
||||
bytes = i->count;
|
||||
|
||||
if (unlikely(!bytes))
|
||||
return 0;
|
||||
|
||||
might_fault();
|
||||
wanted = bytes;
|
||||
iov = i->iov;
|
||||
skip = i->iov_offset;
|
||||
buf = iov->iov_base + skip;
|
||||
copy = min(bytes, iov->iov_len - skip);
|
||||
|
||||
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
|
||||
kaddr = kmap_atomic(page);
|
||||
to = kaddr + offset;
|
||||
|
||||
/* first chunk, usually the only one */
|
||||
left = copyin(to, buf, copy);
|
||||
copy -= left;
|
||||
skip += copy;
|
||||
to += copy;
|
||||
bytes -= copy;
|
||||
|
||||
while (unlikely(!left && bytes)) {
|
||||
iov++;
|
||||
buf = iov->iov_base;
|
||||
copy = min(bytes, iov->iov_len);
|
||||
left = copyin(to, buf, copy);
|
||||
copy -= left;
|
||||
skip = copy;
|
||||
to += copy;
|
||||
bytes -= copy;
|
||||
}
|
||||
if (likely(!bytes)) {
|
||||
kunmap_atomic(kaddr);
|
||||
goto done;
|
||||
}
|
||||
offset = to - kaddr;
|
||||
buf += copy;
|
||||
kunmap_atomic(kaddr);
|
||||
copy = min(bytes, iov->iov_len - skip);
|
||||
}
|
||||
/* Too bad - revert to non-atomic kmap */
|
||||
|
||||
kaddr = kmap(page);
|
||||
to = kaddr + offset;
|
||||
left = copyin(to, buf, copy);
|
||||
copy -= left;
|
||||
skip += copy;
|
||||
to += copy;
|
||||
bytes -= copy;
|
||||
while (unlikely(!left && bytes)) {
|
||||
iov++;
|
||||
buf = iov->iov_base;
|
||||
copy = min(bytes, iov->iov_len);
|
||||
left = copyin(to, buf, copy);
|
||||
copy -= left;
|
||||
skip = copy;
|
||||
to += copy;
|
||||
bytes -= copy;
|
||||
}
|
||||
kunmap(page);
|
||||
|
||||
done:
|
||||
if (skip == iov->iov_len) {
|
||||
iov++;
|
||||
skip = 0;
|
||||
}
|
||||
i->count -= wanted - bytes;
|
||||
i->nr_segs -= iov - i->iov;
|
||||
i->iov = iov;
|
||||
i->iov_offset = skip;
|
||||
return wanted - bytes;
|
||||
}
|
||||
|
||||
#ifdef PIPE_PARANOIA
|
||||
static bool sanity(const struct iov_iter *i)
|
||||
{
|
||||
|
@ -848,24 +680,14 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
|
|||
static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
if (likely(iter_is_iovec(i)))
|
||||
return copy_page_to_iter_iovec(page, offset, bytes, i);
|
||||
if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
|
||||
if (unlikely(iov_iter_is_pipe(i))) {
|
||||
return copy_page_to_iter_pipe(page, offset, bytes, i);
|
||||
} else {
|
||||
void *kaddr = kmap_local_page(page);
|
||||
size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
|
||||
kunmap_local(kaddr);
|
||||
return wanted;
|
||||
}
|
||||
if (iov_iter_is_pipe(i))
|
||||
return copy_page_to_iter_pipe(page, offset, bytes, i);
|
||||
if (unlikely(iov_iter_is_discard(i))) {
|
||||
if (unlikely(i->count < bytes))
|
||||
bytes = i->count;
|
||||
i->count -= bytes;
|
||||
return bytes;
|
||||
}
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||
|
@ -896,17 +718,12 @@ EXPORT_SYMBOL(copy_page_to_iter);
|
|||
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
if (unlikely(!page_copy_sane(page, offset, bytes)))
|
||||
return 0;
|
||||
if (likely(iter_is_iovec(i)))
|
||||
return copy_page_from_iter_iovec(page, offset, bytes, i);
|
||||
if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
|
||||
if (page_copy_sane(page, offset, bytes)) {
|
||||
void *kaddr = kmap_local_page(page);
|
||||
size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
|
||||
kunmap_local(kaddr);
|
||||
return wanted;
|
||||
}
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(copy_page_from_iter);
|
||||
|
|
Loading…
Reference in New Issue