From f6ef7b7bb285c7b63a36f370b40b990891a9aa1f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:29 +0200 Subject: [PATCH 01/14] proc: remove a level of indentation in proc_get_inode Just return early on inode allocation failure. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/proc/inode.c | 68 +++++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 33 deletions(-) diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 28d6105e908e..016b1302cbab 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -619,42 +619,44 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) { struct inode *inode = new_inode(sb); - if (inode) { - inode->i_ino = de->low_ino; - inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); - PROC_I(inode)->pde = de; + if (!inode) { + pde_put(de); + return NULL; + } - if (is_empty_pde(de)) { - make_empty_dir_inode(inode); - return inode; - } - if (de->mode) { - inode->i_mode = de->mode; - inode->i_uid = de->uid; - inode->i_gid = de->gid; - } - if (de->size) - inode->i_size = de->size; - if (de->nlink) - set_nlink(inode, de->nlink); + inode->i_ino = de->low_ino; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); + PROC_I(inode)->pde = de; + if (is_empty_pde(de)) { + make_empty_dir_inode(inode); + return inode; + } - if (S_ISREG(inode->i_mode)) { - inode->i_op = de->proc_iops; - inode->i_fop = &proc_reg_file_ops; + if (de->mode) { + inode->i_mode = de->mode; + inode->i_uid = de->uid; + inode->i_gid = de->gid; + } + if (de->size) + inode->i_size = de->size; + if (de->nlink) + set_nlink(inode, de->nlink); + + if (S_ISREG(inode->i_mode)) { + inode->i_op = de->proc_iops; + inode->i_fop = &proc_reg_file_ops; #ifdef CONFIG_COMPAT - if (!de->proc_ops->proc_compat_ioctl) { - inode->i_fop = &proc_reg_file_ops_no_compat; - } + if (!de->proc_ops->proc_compat_ioctl) + inode->i_fop = &proc_reg_file_ops_no_compat; #endif - } else if (S_ISDIR(inode->i_mode)) { - inode->i_op = de->proc_iops; - inode->i_fop = de->proc_dir_ops; - } else if (S_ISLNK(inode->i_mode)) { - inode->i_op = de->proc_iops; - inode->i_fop = NULL; - } else - BUG(); - } else - pde_put(de); + } else if (S_ISDIR(inode->i_mode)) { + inode->i_op = de->proc_iops; + inode->i_fop = de->proc_dir_ops; + } else if (S_ISLNK(inode->i_mode)) { + inode->i_op = de->proc_iops; + inode->i_fop = NULL; + } else { + BUG(); + } return inode; } From 906146f44922392f00293e52ea9b61168b75ac62 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:30 +0200 Subject: [PATCH 02/14] proc: cleanup the compat vs no compat file ops Instead of providing a special no-compat version provide a special compat version for operations with ->compat_ioctl. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/proc/inode.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 016b1302cbab..93dd20457375 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -572,9 +572,6 @@ static const struct file_operations proc_reg_file_ops = { .write = proc_reg_write, .poll = proc_reg_poll, .unlocked_ioctl = proc_reg_unlocked_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = proc_reg_compat_ioctl, -#endif .mmap = proc_reg_mmap, .get_unmapped_area = proc_reg_get_unmapped_area, .open = proc_reg_open, @@ -582,12 +579,13 @@ static const struct file_operations proc_reg_file_ops = { }; #ifdef CONFIG_COMPAT -static const struct file_operations proc_reg_file_ops_no_compat = { +static const struct file_operations proc_reg_file_ops_compat = { .llseek = proc_reg_llseek, .read = proc_reg_read, .write = proc_reg_write, .poll = proc_reg_poll, .unlocked_ioctl = proc_reg_unlocked_ioctl, + .compat_ioctl = proc_reg_compat_ioctl, .mmap = proc_reg_mmap, .get_unmapped_area = proc_reg_get_unmapped_area, .open = proc_reg_open, @@ -646,8 +644,8 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) inode->i_op = de->proc_iops; inode->i_fop = &proc_reg_file_ops; #ifdef CONFIG_COMPAT - if (!de->proc_ops->proc_compat_ioctl) - inode->i_fop = &proc_reg_file_ops_no_compat; + if (de->proc_ops->proc_compat_ioctl) + inode->i_fop = &proc_reg_file_ops_compat; #endif } else if (S_ISDIR(inode->i_mode)) { inode->i_op = de->proc_iops; From fd5a13f4893c8df2a5a3af8599adecb52d05fe89 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:31 +0200 Subject: [PATCH 03/14] proc: add a read_iter method to proc proc_ops This will allow proc files to implement iter read semantics. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/proc/inode.c | 53 ++++++++++++++++++++++++++++++++++++++--- include/linux/proc_fs.h | 1 + 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 93dd20457375..58c075e2a452 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -297,6 +297,21 @@ static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) return rv; } +static ssize_t proc_reg_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + struct proc_dir_entry *pde = PDE(file_inode(iocb->ki_filp)); + ssize_t ret; + + if (pde_is_permanent(pde)) + return pde->proc_ops->proc_read_iter(iocb, iter); + + if (!use_pde(pde)) + return -EIO; + ret = pde->proc_ops->proc_read_iter(iocb, iter); + unuse_pde(pde); + return ret; +} + static ssize_t pde_read(struct proc_dir_entry *pde, struct file *file, char __user *buf, size_t count, loff_t *ppos) { typeof_member(struct proc_ops, proc_read) read; @@ -578,6 +593,18 @@ static const struct file_operations proc_reg_file_ops = { .release = proc_reg_release, }; +static const struct file_operations proc_iter_file_ops = { + .llseek = proc_reg_llseek, + .read_iter = proc_reg_read_iter, + .write = proc_reg_write, + .poll = proc_reg_poll, + .unlocked_ioctl = proc_reg_unlocked_ioctl, + .mmap = proc_reg_mmap, + .get_unmapped_area = proc_reg_get_unmapped_area, + .open = proc_reg_open, + .release = proc_reg_release, +}; + #ifdef CONFIG_COMPAT static const struct file_operations proc_reg_file_ops_compat = { .llseek = proc_reg_llseek, @@ -591,6 +618,19 @@ static const struct file_operations proc_reg_file_ops_compat = { .open = proc_reg_open, .release = proc_reg_release, }; + +static const struct file_operations proc_iter_file_ops_compat = { + .llseek = proc_reg_llseek, + .read_iter = proc_reg_read_iter, + .write = proc_reg_write, + .poll = proc_reg_poll, + .unlocked_ioctl = proc_reg_unlocked_ioctl, + .compat_ioctl = proc_reg_compat_ioctl, + .mmap = proc_reg_mmap, + .get_unmapped_area = proc_reg_get_unmapped_area, + .open = proc_reg_open, + .release = proc_reg_release, +}; #endif static void proc_put_link(void *p) @@ -642,10 +682,17 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) if (S_ISREG(inode->i_mode)) { inode->i_op = de->proc_iops; - inode->i_fop = &proc_reg_file_ops; + if (de->proc_ops->proc_read_iter) + inode->i_fop = &proc_iter_file_ops; + else + inode->i_fop = &proc_reg_file_ops; #ifdef CONFIG_COMPAT - if (de->proc_ops->proc_compat_ioctl) - inode->i_fop = &proc_reg_file_ops_compat; + if (de->proc_ops->proc_compat_ioctl) { + if (de->proc_ops->proc_read_iter) + inode->i_fop = &proc_iter_file_ops_compat; + else + inode->i_fop = &proc_reg_file_ops_compat; + } #endif } else if (S_ISDIR(inode->i_mode)) { inode->i_op = de->proc_iops; diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 2df965cd0974..270cab43ca3d 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -30,6 +30,7 @@ struct proc_ops { unsigned int proc_flags; int (*proc_open)(struct inode *, struct file *); ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *); + ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *); loff_t (*proc_lseek)(struct file *, loff_t, int); int (*proc_release)(struct inode *, struct file *); From 4bd6a7353ee14697fea645e941354976d2c4a452 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 3 Sep 2020 16:22:32 +0200 Subject: [PATCH 04/14] sysctl: Convert to iter interfaces Using the read_iter/write_iter interfaces allows for in-kernel users to set sysctls without using set_fs(). Also, the buffer is a string, so give it the real type of 'char *', not void *. [AV: Christoph's fixup folded in] Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/proc/proc_sysctl.c | 48 +++++++++++++++++++------------------- include/linux/bpf-cgroup.h | 2 +- kernel/bpf/cgroup.c | 2 +- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 6c1166ccdaea..317899222d7f 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -540,13 +541,14 @@ out: return err; } -static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf, - size_t count, loff_t *ppos, int write) +static ssize_t proc_sys_call_handler(struct kiocb *iocb, struct iov_iter *iter, + int write) { - struct inode *inode = file_inode(filp); + struct inode *inode = file_inode(iocb->ki_filp); struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; - void *kbuf; + size_t count = iov_iter_count(iter); + char *kbuf; ssize_t error; if (IS_ERR(head)) @@ -569,32 +571,30 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf, error = -ENOMEM; if (count >= KMALLOC_MAX_SIZE) goto out; + kbuf = kzalloc(count + 1, GFP_KERNEL); + if (!kbuf) + goto out; if (write) { - kbuf = memdup_user_nul(ubuf, count); - if (IS_ERR(kbuf)) { - error = PTR_ERR(kbuf); - goto out; - } - } else { - kbuf = kzalloc(count, GFP_KERNEL); - if (!kbuf) - goto out; + error = -EFAULT; + if (!copy_from_iter_full(kbuf, count, iter)) + goto out_free_buf; + kbuf[count] = '\0'; } error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, &kbuf, &count, - ppos); + &iocb->ki_pos); if (error) goto out_free_buf; /* careful: calling conventions are nasty here */ - error = table->proc_handler(table, write, kbuf, &count, ppos); + error = table->proc_handler(table, write, kbuf, &count, &iocb->ki_pos); if (error) goto out_free_buf; if (!write) { error = -EFAULT; - if (copy_to_user(ubuf, kbuf, count)) + if (copy_to_iter(kbuf, count, iter) < count) goto out_free_buf; } @@ -607,16 +607,14 @@ out: return error; } -static ssize_t proc_sys_read(struct file *filp, char __user *buf, - size_t count, loff_t *ppos) +static ssize_t proc_sys_read(struct kiocb *iocb, struct iov_iter *iter) { - return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 0); + return proc_sys_call_handler(iocb, iter, 0); } -static ssize_t proc_sys_write(struct file *filp, const char __user *buf, - size_t count, loff_t *ppos) +static ssize_t proc_sys_write(struct kiocb *iocb, struct iov_iter *iter) { - return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 1); + return proc_sys_call_handler(iocb, iter, 1); } static int proc_sys_open(struct inode *inode, struct file *filp) @@ -853,8 +851,10 @@ static int proc_sys_getattr(const struct path *path, struct kstat *stat, static const struct file_operations proc_sys_file_operations = { .open = proc_sys_open, .poll = proc_sys_poll, - .read = proc_sys_read, - .write = proc_sys_write, + .read_iter = proc_sys_read, + .write_iter = proc_sys_write, + .splice_read = generic_file_splice_read, + .splice_write = iter_file_splice_write, .llseek = default_llseek, }; diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 64f367044e25..82b26a1386d8 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -136,7 +136,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, struct ctl_table *table, int write, - void **buf, size_t *pcount, loff_t *ppos, + char **buf, size_t *pcount, loff_t *ppos, enum bpf_attach_type type); int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 83ff127ef7ae..226299352a76 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1226,7 +1226,7 @@ const struct bpf_verifier_ops cg_dev_verifier_ops = { */ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, struct ctl_table *table, int write, - void **buf, size_t *pcount, loff_t *ppos, + char **buf, size_t *pcount, loff_t *ppos, enum bpf_attach_type type) { struct bpf_sysctl_kern ctx = { From 4d03e3cc59828c82ee89ea6e27a2f3cdf95aaadf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:33 +0200 Subject: [PATCH 05/14] fs: don't allow kernel reads and writes without iter ops Don't allow calling ->read or ->write with set_fs as a preparation for killing off set_fs. All the instances that we use kernel_read/write on are using the iter ops already. If a file has both the regular ->read/->write methods and the iter variants those could have different semantics for messed up enough drivers. Also fails the kernel access to them in that case. Signed-off-by: Christoph Hellwig Reviewed-by: Kees Cook Signed-off-by: Al Viro --- fs/read_write.c | 67 +++++++++++++++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 25 deletions(-) diff --git a/fs/read_write.c b/fs/read_write.c index 5db58b8c78d0..702c4301d9eb 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -419,27 +419,41 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo return ret; } +static int warn_unsupported(struct file *file, const char *op) +{ + pr_warn_ratelimited( + "kernel %s not supported for file %pD4 (pid: %d comm: %.20s)\n", + op, file, current->pid, current->comm); + return -EINVAL; +} + ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos) { - mm_segment_t old_fs = get_fs(); + struct kvec iov = { + .iov_base = buf, + .iov_len = min_t(size_t, count, MAX_RW_COUNT), + }; + struct kiocb kiocb; + struct iov_iter iter; ssize_t ret; if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ))) return -EINVAL; if (!(file->f_mode & FMODE_CAN_READ)) return -EINVAL; + /* + * Also fail if ->read_iter and ->read are both wired up as that + * implies very convoluted semantics. + */ + if (unlikely(!file->f_op->read_iter || file->f_op->read)) + return warn_unsupported(file, "read"); - if (count > MAX_RW_COUNT) - count = MAX_RW_COUNT; - set_fs(KERNEL_DS); - if (file->f_op->read) - ret = file->f_op->read(file, (void __user *)buf, count, pos); - else if (file->f_op->read_iter) - ret = new_sync_read(file, (void __user *)buf, count, pos); - else - ret = -EINVAL; - set_fs(old_fs); + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = *pos; + iov_iter_kvec(&iter, READ, &iov, 1, iov.iov_len); + ret = file->f_op->read_iter(&kiocb, &iter); if (ret > 0) { + *pos = kiocb.ki_pos; fsnotify_access(file); add_rchar(current, ret); } @@ -510,28 +524,31 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t /* caller is responsible for file_start_write/file_end_write */ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos) { - mm_segment_t old_fs; - const char __user *p; + struct kvec iov = { + .iov_base = (void *)buf, + .iov_len = min_t(size_t, count, MAX_RW_COUNT), + }; + struct kiocb kiocb; + struct iov_iter iter; ssize_t ret; if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE))) return -EBADF; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; + /* + * Also fail if ->write_iter and ->write are both wired up as that + * implies very convoluted semantics. + */ + if (unlikely(!file->f_op->write_iter || file->f_op->write)) + return warn_unsupported(file, "write"); - old_fs = get_fs(); - set_fs(KERNEL_DS); - p = (__force const char __user *)buf; - if (count > MAX_RW_COUNT) - count = MAX_RW_COUNT; - if (file->f_op->write) - ret = file->f_op->write(file, p, count, pos); - else if (file->f_op->write_iter) - ret = new_sync_write(file, p, count, pos); - else - ret = -EINVAL; - set_fs(old_fs); + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = *pos; + iov_iter_kvec(&iter, WRITE, &iov, 1, iov.iov_len); + ret = file->f_op->write_iter(&kiocb, &iter); if (ret > 0) { + *pos = kiocb.ki_pos; fsnotify_modify(file); add_wchar(current, ret); } From 36e2c7421f02a22f71c9283e55fdb672a9eb58e7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:34 +0200 Subject: [PATCH 06/14] fs: don't allow splice read/write without explicit ops default_file_splice_write is the last piece of generic code that uses set_fs to make the uaccess routines operate on kernel pointers. It implements a "fallback loop" for splicing from files that do not actually provide a proper splice_read method. The usual file systems and other high bandwidth instances all provide a ->splice_read, so this just removes support for various device drivers and procfs/debugfs files. If splice support for any of those turns out to be important it can be added back by switching them to the iter ops and using generic_file_splice_read. Signed-off-by: Christoph Hellwig Reviewed-by: Kees Cook Signed-off-by: Al Viro --- fs/read_write.c | 2 +- fs/splice.c | 130 +++++---------------------------------------- include/linux/fs.h | 2 - 3 files changed, 15 insertions(+), 119 deletions(-) diff --git a/fs/read_write.c b/fs/read_write.c index 702c4301d9eb..8c61f67453e3 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1077,7 +1077,7 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos, } EXPORT_SYMBOL(vfs_iter_write); -ssize_t vfs_readv(struct file *file, const struct iovec __user *vec, +static ssize_t vfs_readv(struct file *file, const struct iovec __user *vec, unsigned long vlen, loff_t *pos, rwf_t flags) { struct iovec iovstack[UIO_FASTIOV]; diff --git a/fs/splice.c b/fs/splice.c index d7c8a7c4db07..412df7b48f9e 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -342,89 +342,6 @@ const struct pipe_buf_operations nosteal_pipe_buf_ops = { }; EXPORT_SYMBOL(nosteal_pipe_buf_ops); -static ssize_t kernel_readv(struct file *file, const struct kvec *vec, - unsigned long vlen, loff_t offset) -{ - mm_segment_t old_fs; - loff_t pos = offset; - ssize_t res; - - old_fs = get_fs(); - set_fs(KERNEL_DS); - /* The cast to a user pointer is valid due to the set_fs() */ - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos, 0); - set_fs(old_fs); - - return res; -} - -static ssize_t default_file_splice_read(struct file *in, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, - unsigned int flags) -{ - struct kvec *vec, __vec[PIPE_DEF_BUFFERS]; - struct iov_iter to; - struct page **pages; - unsigned int nr_pages; - unsigned int mask; - size_t offset, base, copied = 0; - ssize_t res; - int i; - - if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) - return -EAGAIN; - - /* - * Try to keep page boundaries matching to source pagecache ones - - * it probably won't be much help, but... - */ - offset = *ppos & ~PAGE_MASK; - - iov_iter_pipe(&to, READ, pipe, len + offset); - - res = iov_iter_get_pages_alloc(&to, &pages, len + offset, &base); - if (res <= 0) - return -ENOMEM; - - nr_pages = DIV_ROUND_UP(res + base, PAGE_SIZE); - - vec = __vec; - if (nr_pages > PIPE_DEF_BUFFERS) { - vec = kmalloc_array(nr_pages, sizeof(struct kvec), GFP_KERNEL); - if (unlikely(!vec)) { - res = -ENOMEM; - goto out; - } - } - - mask = pipe->ring_size - 1; - pipe->bufs[to.head & mask].offset = offset; - pipe->bufs[to.head & mask].len -= offset; - - for (i = 0; i < nr_pages; i++) { - size_t this_len = min_t(size_t, len, PAGE_SIZE - offset); - vec[i].iov_base = page_address(pages[i]) + offset; - vec[i].iov_len = this_len; - len -= this_len; - offset = 0; - } - - res = kernel_readv(in, vec, nr_pages, *ppos); - if (res > 0) { - copied = res; - *ppos += res; - } - - if (vec != __vec) - kfree(vec); -out: - for (i = 0; i < nr_pages; i++) - put_page(pages[i]); - kvfree(pages); - iov_iter_advance(&to, copied); /* truncates and discards */ - return res; -} - /* * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' * using sendpage(). Return the number of bytes sent. @@ -788,33 +705,6 @@ done: EXPORT_SYMBOL(iter_file_splice_write); -static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, - struct splice_desc *sd) -{ - int ret; - void *data; - loff_t tmp = sd->pos; - - data = kmap(buf->page); - ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp); - kunmap(buf->page); - - return ret; -} - -static ssize_t default_file_splice_write(struct pipe_inode_info *pipe, - struct file *out, loff_t *ppos, - size_t len, unsigned int flags) -{ - ssize_t ret; - - ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf); - if (ret > 0) - *ppos += ret; - - return ret; -} - /** * generic_splice_sendpage - splice data from a pipe to a socket * @pipe: pipe to splice from @@ -836,15 +726,23 @@ ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, EXPORT_SYMBOL(generic_splice_sendpage); +static int warn_unsupported(struct file *file, const char *op) +{ + pr_debug_ratelimited( + "splice %s not supported for file %pD4 (pid: %d comm: %.20s)\n", + op, file, current->pid, current->comm); + return -EINVAL; +} + /* * Attempt to initiate a splice from pipe to file. */ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { - if (out->f_op->splice_write) - return out->f_op->splice_write(pipe, out, ppos, len, flags); - return default_file_splice_write(pipe, out, ppos, len, flags); + if (unlikely(!out->f_op->splice_write)) + return warn_unsupported(out, "write"); + return out->f_op->splice_write(pipe, out, ppos, len, flags); } /* @@ -866,9 +764,9 @@ static long do_splice_to(struct file *in, loff_t *ppos, if (unlikely(len > MAX_RW_COUNT)) len = MAX_RW_COUNT; - if (in->f_op->splice_read) - return in->f_op->splice_read(in, ppos, pipe, len, flags); - return default_file_splice_read(in, ppos, pipe, len, flags); + if (unlikely(!in->f_op->splice_read)) + return warn_unsupported(in, "read"); + return in->f_op->splice_read(in, ppos, pipe, len, flags); } /** diff --git a/include/linux/fs.h b/include/linux/fs.h index e019ea2f1347..d33cc3e8ed41 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1894,8 +1894,6 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); -extern ssize_t vfs_readv(struct file *, const struct iovec __user *, - unsigned long, loff_t *, rwf_t); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, From 5e6e9852d6f76e01b2e6803c74258afa5b432bc5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:35 +0200 Subject: [PATCH 07/14] uaccess: add infrastructure for kernel builds with set_fs() Add a CONFIG_SET_FS option that is selected by architecturess that implement set_fs, which is all of them initially. If the option is not set stubs for routines related to overriding the address space are provided so that architectures can start to opt out of providing set_fs. Signed-off-by: Christoph Hellwig Reviewed-by: Kees Cook Signed-off-by: Al Viro --- arch/Kconfig | 3 +++ arch/alpha/Kconfig | 1 + arch/arc/Kconfig | 1 + arch/arm/Kconfig | 1 + arch/arm64/Kconfig | 1 + arch/c6x/Kconfig | 1 + arch/csky/Kconfig | 1 + arch/h8300/Kconfig | 1 + arch/hexagon/Kconfig | 1 + arch/ia64/Kconfig | 1 + arch/m68k/Kconfig | 1 + arch/microblaze/Kconfig | 1 + arch/mips/Kconfig | 1 + arch/nds32/Kconfig | 1 + arch/nios2/Kconfig | 1 + arch/openrisc/Kconfig | 1 + arch/parisc/Kconfig | 1 + arch/powerpc/Kconfig | 1 + arch/riscv/Kconfig | 1 + arch/s390/Kconfig | 1 + arch/sh/Kconfig | 1 + arch/sparc/Kconfig | 1 + arch/um/Kconfig | 1 + arch/x86/Kconfig | 1 + arch/xtensa/Kconfig | 1 + include/linux/uaccess.h | 18 ++++++++++++++++++ 26 files changed, 45 insertions(+) diff --git a/arch/Kconfig b/arch/Kconfig index af14a567b493..3fab619a6aa5 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -24,6 +24,9 @@ config KEXEC_ELF config HAVE_IMA_KEXEC bool +config SET_FS + bool + config HOTPLUG_SMT bool diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 9c5f06e8eb9b..d6e9fc7a7b19 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -39,6 +39,7 @@ config ALPHA select OLD_SIGSUSPEND select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67 select MMU_GATHER_NO_RANGE + select SET_FS help The Alpha is a 64-bit general-purpose processor designed and marketed by the Digital Equipment Corporation of blessed memory, diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index ba00c4e1e1c2..c49f5754a11e 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -48,6 +48,7 @@ config ARC select PCI_SYSCALL if PCI select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32 + select SET_FS config ARCH_HAS_CACHE_LINE_SIZE def_bool y diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e00d94b16658..87e1478a42dc 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -118,6 +118,7 @@ config ARM select PCI_SYSCALL if PCI select PERF_USE_VMALLOC select RTC_LIB + select SET_FS select SYS_SUPPORTS_APM_EMULATION # Above selects are sorted alphabetically; please add new ones # according to that. Thanks. diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6d232837cbee..fbd9e35bef09 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -192,6 +192,7 @@ config ARM64 select PCI_SYSCALL if PCI select POWER_RESET select POWER_SUPPLY + select SET_FS select SPARSE_IRQ select SWIOTLB select SYSCTL_EXCEPTION_TRACE diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 6444ebfd06a6..48d66bf0465d 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -22,6 +22,7 @@ config C6X select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA select MMU_GATHER_NO_RANGE if MMU + select SET_FS config MMU def_bool n diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 3d5afb5f5685..2836f6e76fdb 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -78,6 +78,7 @@ config CSKY select PCI_DOMAINS_GENERIC if PCI select PCI_SYSCALL if PCI select PCI_MSI if PCI + select SET_FS config LOCKDEP_SUPPORT def_bool y diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index d11666d538fe..7945de067e9f 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -25,6 +25,7 @@ config H8300 select HAVE_ARCH_KGDB select HAVE_ARCH_HASH select CPU_NO_EFFICIENT_FFS + select SET_FS select UACCESS_MEMCPY config CPU_BIG_ENDIAN diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 667cfc511cf9..f2afabbadd43 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -31,6 +31,7 @@ config HEXAGON select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES + select SET_FS help Qualcomm Hexagon is a processor architecture designed for high performance and low power across a wide variety of applications. diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 5b4ec80bf586..22a6853840e2 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -56,6 +56,7 @@ config IA64 select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH select NUMA if !FLATMEM + select SET_FS default y help The Itanium Processor Family is Intel's 64-bit successor to diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 6f2f38d05772..dcf4ae8c9b21 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -32,6 +32,7 @@ config M68K select OLD_SIGSUSPEND3 select OLD_SIGACTION select MMU_GATHER_NO_RANGE if MMU + select SET_FS config CPU_BIG_ENDIAN def_bool y diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index d262ac0c8714..7e3d4583abf3 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -46,6 +46,7 @@ config MICROBLAZE select CPU_NO_EFFICIENT_FFS select MMU_GATHER_NO_RANGE if MMU select SPARSE_IRQ + select SET_FS # Endianness selection choice diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index c95fa3a2484c..fbc26391b588 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -87,6 +87,7 @@ config MIPS select MODULES_USE_ELF_RELA if MODULES && 64BIT select PERF_USE_VMALLOC select RTC_LIB + select SET_FS select SYSCTL_EXCEPTION_TRACE select VIRT_TO_BUS diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index e30298e99e1b..e8e541fd2267 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -48,6 +48,7 @@ config NDS32 select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE + select SET_FS help Andes(nds32) Linux support. diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index c6645141bb2a..c7c6ba6bec9d 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -27,6 +27,7 @@ config NIOS2 select USB_ARCH_HAS_HCD if USB_SUPPORT select CPU_NO_EFFICIENT_FFS select MMU_GATHER_NO_RANGE if MMU + select SET_FS config GENERIC_CSUM def_bool y diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 7e94fe37cb2f..6233c6293180 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -39,6 +39,7 @@ config OPENRISC select ARCH_WANT_FRAME_POINTERS select GENERIC_IRQ_MULTI_HANDLER select MMU_GATHER_NO_RANGE if MMU + select SET_FS config CPU_BIG_ENDIAN def_bool y diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 3b0f53dd70bc..be70af482b5a 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -63,6 +63,7 @@ config PARISC select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE select HAVE_KPROBES_ON_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS + select SET_FS help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1f48bbfb3ce9..3f09d6fdf894 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -249,6 +249,7 @@ config PPC select PCI_SYSCALL if PCI select PPC_DAWR if PPC64 select RTC_LIB + select SET_FS select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 7b5905529146..07d53044013e 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -86,6 +86,7 @@ config RISCV select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select SET_FS config ARCH_MMAP_RND_BITS_MIN default 18 if 64BIT diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 3d86e12e8e3c..fd81385a7787 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -185,6 +185,7 @@ config S390 select OLD_SIGSUSPEND3 select PCI_DOMAINS if PCI select PCI_MSI if PCI + select SET_FS select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index d20927128fce..2bd1653f3b3f 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -71,6 +71,7 @@ config SUPERH select PERF_EVENTS select PERF_USE_VMALLOC select RTC_LIB + select SET_FS select SPARSE_IRQ help The SuperH is a RISC processor targeted for use in embedded systems diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index efeff2c896a5..3e0cf0319a27 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -49,6 +49,7 @@ config SPARC select LOCKDEP_SMALL if LOCKDEP select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH + select SET_FS config SPARC32 def_bool !64BIT diff --git a/arch/um/Kconfig b/arch/um/Kconfig index eb51fec75948..3aefcd815668 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -19,6 +19,7 @@ config UML select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS select HAVE_GCC_PLUGINS + select SET_FS select TTY # Needed for line.c config MMU diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7101ac64bb20..f85c13355732 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -237,6 +237,7 @@ config X86 select HAVE_ARCH_KCSAN if X86_64 select X86_FEATURE_NAMES if PROC_FS select PROC_PID_ARCH_STATUS if PROC_FS + select SET_FS imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI config INSTRUCTION_DECODER diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index e997e0119c02..94bad4d66b4b 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -41,6 +41,7 @@ config XTENSA select IRQ_DOMAIN select MODULES_USE_ELF_RELA select PERF_USE_VMALLOC + select SET_FS select VIRT_TO_BUS help Xtensa processors are 32-bit RISC machines designed by Tensilica diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 94b285411659..70073c802b48 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -8,6 +8,7 @@ #include +#ifdef CONFIG_SET_FS /* * Force the uaccess routines to be wired up for actual userspace access, * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone @@ -25,6 +26,23 @@ static inline void force_uaccess_end(mm_segment_t oldfs) { set_fs(oldfs); } +#else /* CONFIG_SET_FS */ +typedef struct { + /* empty dummy */ +} mm_segment_t; + +#define uaccess_kernel() (false) +#define user_addr_max() (TASK_SIZE_MAX) + +static inline mm_segment_t force_uaccess_begin(void) +{ + return (mm_segment_t) { }; +} + +static inline void force_uaccess_end(mm_segment_t oldfs) +{ +} +#endif /* CONFIG_SET_FS */ /* * Architectures should provide two primitives (raw_copy_{to,from}_user()) From 81b1e242b8bdc1f5094b8d172cdc0c032fc761c1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:36 +0200 Subject: [PATCH 08/14] test_bitmap: remove user bitmap tests We can't run the tests for userspace bitmap parsing if set_fs() doesn't exist, and it is about to go away for x86, powerpc with other major architectures to follow. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- lib/test_bitmap.c | 91 +++++++++++------------------------------------ 1 file changed, 21 insertions(+), 70 deletions(-) diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index df903c53952b..4425a1dd4ef1 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -354,50 +354,37 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = { }; -static void __init __test_bitmap_parselist(int is_user) +static void __init test_bitmap_parselist(void) { int i; int err; ktime_t time; DECLARE_BITMAP(bmap, 2048); - char *mode = is_user ? "_user" : ""; for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) { #define ptest parselist_tests[i] - if (is_user) { - mm_segment_t orig_fs = get_fs(); - size_t len = strlen(ptest.in); - - set_fs(KERNEL_DS); - time = ktime_get(); - err = bitmap_parselist_user((__force const char __user *)ptest.in, len, - bmap, ptest.nbits); - time = ktime_get() - time; - set_fs(orig_fs); - } else { - time = ktime_get(); - err = bitmap_parselist(ptest.in, bmap, ptest.nbits); - time = ktime_get() - time; - } + time = ktime_get(); + err = bitmap_parselist(ptest.in, bmap, ptest.nbits); + time = ktime_get() - time; if (err != ptest.errno) { - pr_err("parselist%s: %d: input is %s, errno is %d, expected %d\n", - mode, i, ptest.in, err, ptest.errno); + pr_err("parselist: %d: input is %s, errno is %d, expected %d\n", + i, ptest.in, err, ptest.errno); continue; } if (!err && ptest.expected && !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) { - pr_err("parselist%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n", - mode, i, ptest.in, bmap[0], + pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n", + i, ptest.in, bmap[0], *ptest.expected); continue; } if (ptest.flags & PARSE_TIME) - pr_err("parselist%s: %d: input is '%s' OK, Time: %llu\n", - mode, i, ptest.in, time); + pr_err("parselist: %d: input is '%s' OK, Time: %llu\n", + i, ptest.in, time); #undef ptest } @@ -443,75 +430,41 @@ static const struct test_bitmap_parselist parse_tests[] __initconst = { #undef step }; -static void __init __test_bitmap_parse(int is_user) +static void __init test_bitmap_parse(void) { int i; int err; ktime_t time; DECLARE_BITMAP(bmap, 2048); - char *mode = is_user ? "_user" : ""; for (i = 0; i < ARRAY_SIZE(parse_tests); i++) { struct test_bitmap_parselist test = parse_tests[i]; + size_t len = test.flags & NO_LEN ? UINT_MAX : strlen(test.in); - if (is_user) { - size_t len = strlen(test.in); - mm_segment_t orig_fs = get_fs(); - - set_fs(KERNEL_DS); - time = ktime_get(); - err = bitmap_parse_user((__force const char __user *)test.in, len, - bmap, test.nbits); - time = ktime_get() - time; - set_fs(orig_fs); - } else { - size_t len = test.flags & NO_LEN ? - UINT_MAX : strlen(test.in); - time = ktime_get(); - err = bitmap_parse(test.in, len, bmap, test.nbits); - time = ktime_get() - time; - } + time = ktime_get(); + err = bitmap_parse(test.in, len, bmap, test.nbits); + time = ktime_get() - time; if (err != test.errno) { - pr_err("parse%s: %d: input is %s, errno is %d, expected %d\n", - mode, i, test.in, err, test.errno); + pr_err("parse: %d: input is %s, errno is %d, expected %d\n", + i, test.in, err, test.errno); continue; } if (!err && test.expected && !__bitmap_equal(bmap, test.expected, test.nbits)) { - pr_err("parse%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n", - mode, i, test.in, bmap[0], + pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n", + i, test.in, bmap[0], *test.expected); continue; } if (test.flags & PARSE_TIME) - pr_err("parse%s: %d: input is '%s' OK, Time: %llu\n", - mode, i, test.in, time); + pr_err("parse: %d: input is '%s' OK, Time: %llu\n", + i, test.in, time); } } -static void __init test_bitmap_parselist(void) -{ - __test_bitmap_parselist(0); -} - -static void __init test_bitmap_parselist_user(void) -{ - __test_bitmap_parselist(1); -} - -static void __init test_bitmap_parse(void) -{ - __test_bitmap_parse(0); -} - -static void __init test_bitmap_parse_user(void) -{ - __test_bitmap_parse(1); -} - #define EXP1_IN_BITS (sizeof(exp1) * 8) static void __init test_bitmap_arr32(void) @@ -675,9 +628,7 @@ static void __init selftest(void) test_replace(); test_bitmap_arr32(); test_bitmap_parse(); - test_bitmap_parse_user(); test_bitmap_parselist(); - test_bitmap_parselist_user(); test_mem_optimisations(); test_for_each_set_clump8(); test_bitmap_cut(); From c6f7c753f71cccb21d1a7f0c1127ce7804374c5e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:37 +0200 Subject: [PATCH 09/14] lkdtm: remove set_fs-based tests Once we can't manipulate the address limit, we also can't test what happens when the manipulation is abused. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- drivers/misc/lkdtm/bugs.c | 10 ---------- drivers/misc/lkdtm/core.c | 2 -- drivers/misc/lkdtm/lkdtm.h | 2 -- drivers/misc/lkdtm/usercopy.c | 15 --------------- tools/testing/selftests/lkdtm/tests.txt | 2 -- 5 files changed, 31 deletions(-) diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 4dfbfd51bdf7..a0675d4154d2 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -312,16 +312,6 @@ void lkdtm_CORRUPT_LIST_DEL(void) pr_err("list_del() corruption not detected!\n"); } -/* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */ -void lkdtm_CORRUPT_USER_DS(void) -{ - pr_info("setting bad task size limit\n"); - set_fs(KERNEL_DS); - - /* Make sure we do not keep running with a KERNEL_DS! */ - force_sig(SIGKILL); -} - /* Test that VMAP_STACK is actually allocating with a leading guard page */ void lkdtm_STACK_GUARD_PAGE_LEADING(void) { diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index a5e344df9166..97803f213d9d 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -112,7 +112,6 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(CORRUPT_STACK_STRONG), CRASHTYPE(CORRUPT_LIST_ADD), CRASHTYPE(CORRUPT_LIST_DEL), - CRASHTYPE(CORRUPT_USER_DS), CRASHTYPE(STACK_GUARD_PAGE_LEADING), CRASHTYPE(STACK_GUARD_PAGE_TRAILING), CRASHTYPE(UNSET_SMEP), @@ -172,7 +171,6 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(USERCOPY_STACK_FRAME_FROM), CRASHTYPE(USERCOPY_STACK_BEYOND), CRASHTYPE(USERCOPY_KERNEL), - CRASHTYPE(USERCOPY_KERNEL_DS), CRASHTYPE(STACKLEAK_ERASING), CRASHTYPE(CFI_FORWARD_PROTO), #ifdef CONFIG_X86_32 diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 8878538b2c13..6dec4c9b442f 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -27,7 +27,6 @@ void lkdtm_OVERFLOW_UNSIGNED(void); void lkdtm_ARRAY_BOUNDS(void); void lkdtm_CORRUPT_LIST_ADD(void); void lkdtm_CORRUPT_LIST_DEL(void); -void lkdtm_CORRUPT_USER_DS(void); void lkdtm_STACK_GUARD_PAGE_LEADING(void); void lkdtm_STACK_GUARD_PAGE_TRAILING(void); void lkdtm_UNSET_SMEP(void); @@ -96,7 +95,6 @@ void lkdtm_USERCOPY_STACK_FRAME_TO(void); void lkdtm_USERCOPY_STACK_FRAME_FROM(void); void lkdtm_USERCOPY_STACK_BEYOND(void); void lkdtm_USERCOPY_KERNEL(void); -void lkdtm_USERCOPY_KERNEL_DS(void); /* lkdtm_stackleak.c */ void lkdtm_STACKLEAK_ERASING(void); diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c index b833367a45d0..109e8d4302c1 100644 --- a/drivers/misc/lkdtm/usercopy.c +++ b/drivers/misc/lkdtm/usercopy.c @@ -325,21 +325,6 @@ free_user: vm_munmap(user_addr, PAGE_SIZE); } -void lkdtm_USERCOPY_KERNEL_DS(void) -{ - char __user *user_ptr = - (char __user *)(0xFUL << (sizeof(unsigned long) * 8 - 4)); - mm_segment_t old_fs = get_fs(); - char buf[10] = {0}; - - pr_info("attempting copy_to_user() to noncanonical address: %px\n", - user_ptr); - set_fs(KERNEL_DS); - if (copy_to_user(user_ptr, buf, sizeof(buf)) == 0) - pr_err("copy_to_user() to noncanonical address succeeded!?\n"); - set_fs(old_fs); -} - void __init lkdtm_usercopy_init(void) { /* Prepare cache that lacks SLAB_USERCOPY flag. */ diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt index 9d266e79c6a2..74a8d329a72c 100644 --- a/tools/testing/selftests/lkdtm/tests.txt +++ b/tools/testing/selftests/lkdtm/tests.txt @@ -9,7 +9,6 @@ EXCEPTION #CORRUPT_STACK_STRONG Crashes entire system on success CORRUPT_LIST_ADD list_add corruption CORRUPT_LIST_DEL list_del corruption -CORRUPT_USER_DS Invalid address limit on user-mode return STACK_GUARD_PAGE_LEADING STACK_GUARD_PAGE_TRAILING UNSET_SMEP CR4 bits went missing @@ -67,6 +66,5 @@ USERCOPY_STACK_FRAME_TO USERCOPY_STACK_FRAME_FROM USERCOPY_STACK_BEYOND USERCOPY_KERNEL -USERCOPY_KERNEL_DS STACKLEAK_ERASING OK: the rest of the thread stack is properly erased CFI_FORWARD_PROTO From 999c83e8ffd90caa00ee6caf054e037cf8a27d0e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:38 +0200 Subject: [PATCH 10/14] x86: move PAGE_OFFSET, TASK_SIZE & friends to page_{32,64}_types.h At least for 64-bit this moves them closer to some of the defines they are based on, and it prepares for using the TASK_SIZE_MAX definition from assembly. Signed-off-by: Christoph Hellwig Reviewed-by: Kees Cook Signed-off-by: Al Viro --- arch/x86/include/asm/page_32_types.h | 11 +++++++ arch/x86/include/asm/page_64_types.h | 38 +++++++++++++++++++++ arch/x86/include/asm/processor.h | 49 ---------------------------- 3 files changed, 49 insertions(+), 49 deletions(-) diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index 565ad755c785..26236925fb2c 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -41,6 +41,17 @@ #define __VIRTUAL_MASK_SHIFT 32 #endif /* CONFIG_X86_PAE */ +/* + * User space process size: 3GB (default). + */ +#define IA32_PAGE_OFFSET PAGE_OFFSET +#define TASK_SIZE PAGE_OFFSET +#define TASK_SIZE_LOW TASK_SIZE +#define TASK_SIZE_MAX TASK_SIZE +#define DEFAULT_MAP_WINDOW TASK_SIZE +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP + /* * Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S) */ diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 288b065955b7..996595c9897e 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -58,6 +58,44 @@ #define __VIRTUAL_MASK_SHIFT 47 #endif +/* + * User space process size. This is the first address outside the user range. + * There are a few constraints that determine this: + * + * On Intel CPUs, if a SYSCALL instruction is at the highest canonical + * address, then that syscall will enter the kernel with a + * non-canonical return address, and SYSRET will explode dangerously. + * We avoid this particular problem by preventing anything executable + * from being mapped at the maximum canonical address. + * + * On AMD CPUs in the Ryzen family, there's a nasty bug in which the + * CPUs malfunction if they execute code from the highest canonical page. + * They'll speculate right off the end of the canonical space, and + * bad things happen. This is worked around in the same way as the + * Intel problem. + * + * With page table isolation enabled, we map the LDT in ... [stay tuned] + */ +#define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) + +#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ + 0xc0000000 : 0xFFFFe000) + +#define TASK_SIZE_LOW (test_thread_flag(TIF_ADDR32) ? \ + IA32_PAGE_OFFSET : DEFAULT_MAP_WINDOW) +#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ + IA32_PAGE_OFFSET : TASK_SIZE_MAX) +#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ + IA32_PAGE_OFFSET : TASK_SIZE_MAX) + +#define STACK_TOP TASK_SIZE_LOW +#define STACK_TOP_MAX TASK_SIZE_MAX + /* * Maximum kernel image size is limited to 1 GiB, due to the fixmap living * in the next 1 GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 97143d87994c..1618eeb08361 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -782,17 +782,6 @@ static inline void spin_lock_prefetch(const void *x) }) #ifdef CONFIG_X86_32 -/* - * User space process size: 3GB (default). - */ -#define IA32_PAGE_OFFSET PAGE_OFFSET -#define TASK_SIZE PAGE_OFFSET -#define TASK_SIZE_LOW TASK_SIZE -#define TASK_SIZE_MAX TASK_SIZE -#define DEFAULT_MAP_WINDOW TASK_SIZE -#define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX STACK_TOP - #define INIT_THREAD { \ .sp0 = TOP_OF_INIT_STACK, \ .sysenter_cs = __KERNEL_CS, \ @@ -802,44 +791,6 @@ static inline void spin_lock_prefetch(const void *x) #define KSTK_ESP(task) (task_pt_regs(task)->sp) #else -/* - * User space process size. This is the first address outside the user range. - * There are a few constraints that determine this: - * - * On Intel CPUs, if a SYSCALL instruction is at the highest canonical - * address, then that syscall will enter the kernel with a - * non-canonical return address, and SYSRET will explode dangerously. - * We avoid this particular problem by preventing anything executable - * from being mapped at the maximum canonical address. - * - * On AMD CPUs in the Ryzen family, there's a nasty bug in which the - * CPUs malfunction if they execute code from the highest canonical page. - * They'll speculate right off the end of the canonical space, and - * bad things happen. This is worked around in the same way as the - * Intel problem. - * - * With page table isolation enabled, we map the LDT in ... [stay tuned] - */ -#define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) - -#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) - -/* This decides where the kernel will search for a free chunk of vm - * space during mmap's. - */ -#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ - 0xc0000000 : 0xFFFFe000) - -#define TASK_SIZE_LOW (test_thread_flag(TIF_ADDR32) ? \ - IA32_PAGE_OFFSET : DEFAULT_MAP_WINDOW) -#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ - IA32_PAGE_OFFSET : TASK_SIZE_MAX) -#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ - IA32_PAGE_OFFSET : TASK_SIZE_MAX) - -#define STACK_TOP TASK_SIZE_LOW -#define STACK_TOP_MAX TASK_SIZE_MAX - #define INIT_THREAD { \ .addr_limit = KERNEL_DS, \ } From a1d826d475aafe63775b910e86ccd1bcf1e5a6e1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:39 +0200 Subject: [PATCH 11/14] x86: make TASK_SIZE_MAX usable from assembly code For 64-bit the only thing missing was a strategic _AC, and for 32-bit we need to use __PAGE_OFFSET instead of PAGE_OFFSET in the TASK_SIZE definition to escape the explicit unsigned long cast. This just works because __PAGE_OFFSET is defined using _AC itself and thus never needs the cast anyway. Signed-off-by: Christoph Hellwig Reviewed-by: Kees Cook Signed-off-by: Al Viro --- arch/x86/include/asm/page_32_types.h | 4 ++-- arch/x86/include/asm/page_64_types.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index 26236925fb2c..f462895a33e4 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -44,8 +44,8 @@ /* * User space process size: 3GB (default). */ -#define IA32_PAGE_OFFSET PAGE_OFFSET -#define TASK_SIZE PAGE_OFFSET +#define IA32_PAGE_OFFSET __PAGE_OFFSET +#define TASK_SIZE __PAGE_OFFSET #define TASK_SIZE_LOW TASK_SIZE #define TASK_SIZE_MAX TASK_SIZE #define DEFAULT_MAP_WINDOW TASK_SIZE diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 996595c9897e..838515daf87b 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -76,7 +76,7 @@ * * With page table isolation enabled, we map the LDT in ... [stay tuned] */ -#define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) +#define TASK_SIZE_MAX ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) From 47058bb54b57962b3958a936ddbc59355e4c5504 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:40 +0200 Subject: [PATCH 12/14] x86: remove address space overrides using set_fs() Stop providing the possibility to override the address space using set_fs() now that there is no need for that any more. To properly handle the TASK_SIZE_MAX checking for 4 vs 5-level page tables on x86 a new alternative is introduced, which just like the one in entry_64.S has to use the hardcoded virtual address bits to escape the fact that TASK_SIZE_MAX isn't actually a constant when 5-level page tables are enabled. Signed-off-by: Christoph Hellwig Reviewed-by: Kees Cook Signed-off-by: Al Viro --- arch/x86/Kconfig | 1 - arch/x86/ia32/ia32_aout.c | 1 - arch/x86/include/asm/processor.h | 11 +------ arch/x86/include/asm/thread_info.h | 2 -- arch/x86/include/asm/uaccess.h | 26 +---------------- arch/x86/kernel/asm-offsets.c | 3 -- arch/x86/lib/getuser.S | 47 +++++++++++++++--------------- arch/x86/lib/putuser.S | 25 ++++++++-------- 8 files changed, 39 insertions(+), 77 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f85c13355732..7101ac64bb20 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -237,7 +237,6 @@ config X86 select HAVE_ARCH_KCSAN if X86_64 select X86_FEATURE_NAMES if PROC_FS select PROC_PID_ARCH_STATUS if PROC_FS - select SET_FS imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI config INSTRUCTION_DECODER diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index ca8a657edf59..a09fc37ead9d 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -239,7 +239,6 @@ beyond_if: (regs)->ss = __USER32_DS; regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; - set_fs(USER_DS); return 0; } diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 1618eeb08361..189573d95c3a 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -482,10 +482,6 @@ extern unsigned int fpu_user_xstate_size; struct perf_event; -typedef struct { - unsigned long seg; -} mm_segment_t; - struct thread_struct { /* Cached TLS descriptors: */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; @@ -538,8 +534,6 @@ struct thread_struct { */ unsigned long iopl_emul; - mm_segment_t addr_limit; - unsigned int sig_on_uaccess_err:1; /* Floating point and extended processor state */ @@ -785,15 +779,12 @@ static inline void spin_lock_prefetch(const void *x) #define INIT_THREAD { \ .sp0 = TOP_OF_INIT_STACK, \ .sysenter_cs = __KERNEL_CS, \ - .addr_limit = KERNEL_DS, \ } #define KSTK_ESP(task) (task_pt_regs(task)->sp) #else -#define INIT_THREAD { \ - .addr_limit = KERNEL_DS, \ -} +#define INIT_THREAD { } extern unsigned long KSTK_ESP(struct task_struct *task); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 267701ae3d86..44733a4bfc42 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -102,7 +102,6 @@ struct thread_info { #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ #define TIF_X32 30 /* 32-bit native x86-64 binary */ -#define TIF_FSCHECK 31 /* Check FS is USER_DS on return */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) @@ -131,7 +130,6 @@ struct thread_info { #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_ADDR32 (1 << TIF_ADDR32) #define _TIF_X32 (1 << TIF_X32) -#define _TIF_FSCHECK (1 << TIF_FSCHECK) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW_BASE \ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index ecefaffd15d4..a4ceda0510ea 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -12,30 +12,6 @@ #include #include -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons, these macros are grossly misnamed. - */ - -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) - -#define KERNEL_DS MAKE_MM_SEG(-1UL) -#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) - -#define get_fs() (current->thread.addr_limit) -static inline void set_fs(mm_segment_t fs) -{ - current->thread.addr_limit = fs; - /* On user-mode return, check fs is correct */ - set_thread_flag(TIF_FSCHECK); -} - -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) -#define user_addr_max() (current->thread.addr_limit.seg) - /* * Test whether a block of memory is a valid user space address. * Returns 0 if the range is valid, nonzero otherwise. @@ -93,7 +69,7 @@ static inline bool pagefault_disabled(void); #define access_ok(addr, size) \ ({ \ WARN_ON_IN_IRQ(); \ - likely(!__range_not_ok(addr, size, user_addr_max())); \ + likely(!__range_not_ok(addr, size, TASK_SIZE_MAX)); \ }) /* diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 3ca07ad552ae..70b7154f4bdd 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -37,9 +37,6 @@ static void __used common(void) OFFSET(TASK_stack_canary, task_struct, stack_canary); #endif - BLANK(); - OFFSET(TASK_addr_limit, task_struct, thread.addr_limit); - BLANK(); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index c8a85b512796..2f052bc96866 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -35,10 +35,19 @@ #include #include +#ifdef CONFIG_X86_5LEVEL +#define LOAD_TASK_SIZE_MINUS_N(n) \ + ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rdx), \ + __stringify(mov $((1 << 56) - 4096 - (n)),%rdx), X86_FEATURE_LA57 +#else +#define LOAD_TASK_SIZE_MINUS_N(n) \ + mov $(TASK_SIZE_MAX - (n)),%_ASM_DX +#endif + .text SYM_FUNC_START(__get_user_1) - mov PER_CPU_VAR(current_task), %_ASM_DX - cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + LOAD_TASK_SIZE_MINUS_N(0) + cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX @@ -51,15 +60,13 @@ SYM_FUNC_END(__get_user_1) EXPORT_SYMBOL(__get_user_1) SYM_FUNC_START(__get_user_2) - add $1,%_ASM_AX - jc bad_get_user - mov PER_CPU_VAR(current_task), %_ASM_DX - cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + LOAD_TASK_SIZE_MINUS_N(1) + cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC -2: movzwl -1(%_ASM_AX),%edx +2: movzwl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC ret @@ -67,15 +74,13 @@ SYM_FUNC_END(__get_user_2) EXPORT_SYMBOL(__get_user_2) SYM_FUNC_START(__get_user_4) - add $3,%_ASM_AX - jc bad_get_user - mov PER_CPU_VAR(current_task), %_ASM_DX - cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + LOAD_TASK_SIZE_MINUS_N(3) + cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC -3: movl -3(%_ASM_AX),%edx +3: movl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC ret @@ -84,29 +89,25 @@ EXPORT_SYMBOL(__get_user_4) SYM_FUNC_START(__get_user_8) #ifdef CONFIG_X86_64 - add $7,%_ASM_AX - jc bad_get_user - mov PER_CPU_VAR(current_task), %_ASM_DX - cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + LOAD_TASK_SIZE_MINUS_N(7) + cmp %_ASM_DX,%_ASM_AX jae bad_get_user sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC -4: movq -7(%_ASM_AX),%rdx +4: movq (%_ASM_AX),%rdx xor %eax,%eax ASM_CLAC ret #else - add $7,%_ASM_AX - jc bad_get_user_8 - mov PER_CPU_VAR(current_task), %_ASM_DX - cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + LOAD_TASK_SIZE_MINUS_N(7) + cmp %_ASM_DX,%_ASM_AX jae bad_get_user_8 sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ and %_ASM_DX, %_ASM_AX ASM_STAC -4: movl -7(%_ASM_AX),%edx -5: movl -3(%_ASM_AX),%ecx +4: movl (%_ASM_AX),%edx +5: movl 4(%_ASM_AX),%ecx xor %eax,%eax ASM_CLAC ret diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index 7c7c92db8497..358239d77dff 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -31,12 +31,19 @@ * as they get called from within inline assembly. */ -#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX +#ifdef CONFIG_X86_5LEVEL +#define LOAD_TASK_SIZE_MINUS_N(n) \ + ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rbx), \ + __stringify(mov $((1 << 56) - 4096 - (n)),%rbx), X86_FEATURE_LA57 +#else +#define LOAD_TASK_SIZE_MINUS_N(n) \ + mov $(TASK_SIZE_MAX - (n)),%_ASM_BX +#endif .text SYM_FUNC_START(__put_user_1) - ENTER - cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX + LOAD_TASK_SIZE_MINUS_N(0) + cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user ASM_STAC 1: movb %al,(%_ASM_CX) @@ -47,9 +54,7 @@ SYM_FUNC_END(__put_user_1) EXPORT_SYMBOL(__put_user_1) SYM_FUNC_START(__put_user_2) - ENTER - mov TASK_addr_limit(%_ASM_BX),%_ASM_BX - sub $1,%_ASM_BX + LOAD_TASK_SIZE_MINUS_N(1) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user ASM_STAC @@ -61,9 +66,7 @@ SYM_FUNC_END(__put_user_2) EXPORT_SYMBOL(__put_user_2) SYM_FUNC_START(__put_user_4) - ENTER - mov TASK_addr_limit(%_ASM_BX),%_ASM_BX - sub $3,%_ASM_BX + LOAD_TASK_SIZE_MINUS_N(3) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user ASM_STAC @@ -75,9 +78,7 @@ SYM_FUNC_END(__put_user_4) EXPORT_SYMBOL(__put_user_4) SYM_FUNC_START(__put_user_8) - ENTER - mov TASK_addr_limit(%_ASM_BX),%_ASM_BX - sub $7,%_ASM_BX + LOAD_TASK_SIZE_MINUS_N(7) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user ASM_STAC From c33165253492b91337f88c84b11733a4a2d55b60 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:41 +0200 Subject: [PATCH 13/14] powerpc: use non-set_fs based maccess routines Provide __get_kernel_nofault and __put_kernel_nofault routines to implement the maccess routines without messing with set_fs and without opening up access to user space. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- arch/powerpc/include/asm/uaccess.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 00699903f1ef..7fe3531ad36a 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -623,4 +623,20 @@ do { \ __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\ } while (0) +#define HAVE_GET_KERNEL_NOFAULT + +#define __get_kernel_nofault(dst, src, type, err_label) \ +do { \ + int __kr_err; \ + \ + __get_user_size_allowed(*((type *)(dst)), (__force type __user *)(src),\ + sizeof(type), __kr_err); \ + if (unlikely(__kr_err)) \ + goto err_label; \ +} while (0) + +#define __put_kernel_nofault(dst, src, type, err_label) \ + __put_user_size_goto(*((type *)(src)), \ + (__force type __user *)(dst), sizeof(type), err_label) + #endif /* _ARCH_POWERPC_UACCESS_H */ From 5ae4998b5d6fc703a16c9fa935fb7d335843bf22 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 16:22:42 +0200 Subject: [PATCH 14/14] powerpc: remove address space overrides using set_fs() Stop providing the possibility to override the address space using set_fs() now that there is no need for that any more. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- arch/powerpc/Kconfig | 1 - arch/powerpc/include/asm/processor.h | 7 ---- arch/powerpc/include/asm/thread_info.h | 5 +-- arch/powerpc/include/asm/uaccess.h | 51 +++----------------------- arch/powerpc/kernel/signal.c | 3 -- arch/powerpc/lib/sstep.c | 6 +-- 6 files changed, 9 insertions(+), 64 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3f09d6fdf894..1f48bbfb3ce9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -249,7 +249,6 @@ config PPC select PCI_SYSCALL if PCI select PPC_DAWR if PPC64 select RTC_LIB - select SET_FS select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index ed0d633ab5aa..f01e4d650c52 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -83,10 +83,6 @@ struct task_struct; void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); void release_thread(struct task_struct *); -typedef struct { - unsigned long seg; -} mm_segment_t; - #define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET] #define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET] @@ -148,7 +144,6 @@ struct thread_struct { unsigned long ksp_vsid; #endif struct pt_regs *regs; /* Pointer to saved register state */ - mm_segment_t addr_limit; /* for get_fs() validation */ #ifdef CONFIG_BOOKE /* BookE base exception scratch space; align on cacheline */ unsigned long normsave[8] ____cacheline_aligned; @@ -295,7 +290,6 @@ struct thread_struct { #define INIT_THREAD { \ .ksp = INIT_SP, \ .ksp_limit = INIT_SP_LIMIT, \ - .addr_limit = KERNEL_DS, \ .pgdir = swapper_pg_dir, \ .fpexc_mode = MSR_FE0 | MSR_FE1, \ SPEFSCR_INIT \ @@ -303,7 +297,6 @@ struct thread_struct { #else #define INIT_THREAD { \ .ksp = INIT_SP, \ - .addr_limit = KERNEL_DS, \ .fpexc_mode = 0, \ } #endif diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index ca6c97025704..46a210b03d2b 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -90,7 +90,6 @@ void arch_setup_new_exec(void); #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_FSCHECK 3 /* Check FS is USER_DS on return */ #define TIF_SYSCALL_EMU 4 /* syscall emulation active */ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_PATCH_PENDING 6 /* pending live patching update */ @@ -130,7 +129,6 @@ void arch_setup_new_exec(void); #define _TIF_SYSCALL_TRACEPOINT (1< #include -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons, these macros are grossly misnamed. - * - * The fs/ds values are now the highest legal address in the "segment". - * This simplifies the checking in the routines below. - */ - -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) - -#define KERNEL_DS MAKE_MM_SEG(~0UL) #ifdef __powerpc64__ /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ -#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1) +#define TASK_SIZE_MAX TASK_SIZE_USER64 #else -#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) +#define TASK_SIZE_MAX TASK_SIZE #endif -#define get_fs() (current->thread.addr_limit) - -static inline void set_fs(mm_segment_t fs) +static inline bool __access_ok(unsigned long addr, unsigned long size) { - current->thread.addr_limit = fs; - /* On user-mode return check addr_limit (fs) is correct */ - set_thread_flag(TIF_FSCHECK); + return addr < TASK_SIZE_MAX && size <= TASK_SIZE_MAX - addr; } -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) -#define user_addr_max() (get_fs().seg) - -#ifdef __powerpc64__ -/* - * This check is sufficient because there is a large enough - * gap between user addresses and the kernel addresses - */ -#define __access_ok(addr, size, segment) \ - (((addr) <= (segment).seg) && ((size) <= (segment).seg)) - -#else - -static inline int __access_ok(unsigned long addr, unsigned long size, - mm_segment_t seg) -{ - if (addr > seg.seg) - return 0; - return (size == 0 || size - 1 <= seg.seg - addr); -} - -#endif - #define access_ok(addr, size) \ (__chk_user_ptr(addr), \ - __access_ok((__force unsigned long)(addr), (size), get_fs())) + __access_ok((unsigned long)(addr), (size))) /* * These are the main single-value transfer routines. They automatically diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index d15a98c758b8..df547d8e31e4 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -312,9 +312,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { user_exit(); - /* Check valid addr_limit, TIF check is done there */ - addr_limit_user_check(); - if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index caee8cc77e19..8342188ea1ac 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -108,11 +108,11 @@ static nokprobe_inline long address_ok(struct pt_regs *regs, { if (!user_mode(regs)) return 1; - if (__access_ok(ea, nb, USER_DS)) + if (__access_ok(ea, nb)) return 1; - if (__access_ok(ea, 1, USER_DS)) + if (__access_ok(ea, 1)) /* Access overlaps the end of the user region */ - regs->dar = USER_DS.seg; + regs->dar = TASK_SIZE_MAX - 1; else regs->dar = ea; return 0;