From 0b8c0ec7eedcd8f9f1a1f238d87f9b512b09e71a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 2 Dec 2019 08:50:00 -0700 Subject: [PATCH 01/33] io_uring: use current task creds instead of allocating a new one syzbot reports: kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] PREEMPT SMP KASAN CPU: 0 PID: 9217 Comm: io_uring-sq Not tainted 5.4.0-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:creds_are_invalid kernel/cred.c:792 [inline] RIP: 0010:__validate_creds include/linux/cred.h:187 [inline] RIP: 0010:override_creds+0x9f/0x170 kernel/cred.c:550 Code: ac 25 00 81 fb 64 65 73 43 0f 85 a3 37 00 00 e8 17 ab 25 00 49 8d 7c 24 10 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 08 3c 03 0f 8e 96 00 00 00 41 8b 5c 24 10 bf RSP: 0018:ffff88809c45fda0 EFLAGS: 00010202 RAX: dffffc0000000000 RBX: 0000000043736564 RCX: ffffffff814f3318 RDX: 0000000000000002 RSI: ffffffff814f3329 RDI: 0000000000000010 RBP: ffff88809c45fdb8 R08: ffff8880a3aac240 R09: ffffed1014755849 R10: ffffed1014755848 R11: ffff8880a3aac247 R12: 0000000000000000 R13: ffff888098ab1600 R14: 0000000000000000 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff8880ae800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007ffd51c40664 CR3: 0000000092641000 CR4: 00000000001406f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: io_sq_thread+0x1c7/0xa20 fs/io_uring.c:3274 kthread+0x361/0x430 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352 Modules linked in: ---[ end trace f2e1a4307fbe2245 ]--- RIP: 0010:creds_are_invalid kernel/cred.c:792 [inline] RIP: 0010:__validate_creds include/linux/cred.h:187 [inline] RIP: 0010:override_creds+0x9f/0x170 kernel/cred.c:550 Code: ac 25 00 81 fb 64 65 73 43 0f 85 a3 37 00 00 e8 17 ab 25 00 49 8d 7c 24 10 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 08 3c 03 0f 8e 96 00 00 00 41 8b 5c 24 10 bf RSP: 0018:ffff88809c45fda0 EFLAGS: 00010202 RAX: dffffc0000000000 RBX: 0000000043736564 RCX: ffffffff814f3318 RDX: 0000000000000002 RSI: ffffffff814f3329 RDI: 0000000000000010 RBP: ffff88809c45fdb8 R08: ffff8880a3aac240 R09: ffffed1014755849 R10: ffffed1014755848 R11: ffff8880a3aac247 R12: 0000000000000000 R13: ffff888098ab1600 R14: 0000000000000000 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff8880ae800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007ffd51c40664 CR3: 0000000092641000 CR4: 00000000001406f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 which is caused by slab fault injection triggering a failure in prepare_creds(). We don't actually need to create a copy of the creds as we're not modifying it, we just need a reference on the current task creds. This avoids the failure case as well, and propagates the const throughout the stack. Fixes: 181e448d8709 ("io_uring: async workers should inherit the user creds") Reported-by: syzbot+5320383e16029ba057ff@syzkaller.appspotmail.com Signed-off-by: Jens Axboe --- fs/io-wq.c | 2 +- fs/io-wq.h | 2 +- fs/io_uring.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 91b85df0861e..74b40506c5d9 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -111,7 +111,7 @@ struct io_wq { struct task_struct *manager; struct user_struct *user; - struct cred *creds; + const struct cred *creds; struct mm_struct *mm; refcount_t refs; struct completion done; diff --git a/fs/io-wq.h b/fs/io-wq.h index 600e0158cba7..dd0af0d7376c 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -87,7 +87,7 @@ typedef void (put_work_fn)(struct io_wq_work *); struct io_wq_data { struct mm_struct *mm; struct user_struct *user; - struct cred *creds; + const struct cred *creds; get_work_fn *get_work; put_work_fn *put_work; diff --git a/fs/io_uring.c b/fs/io_uring.c index ec53aa7cdc94..5cab7a047317 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -238,7 +238,7 @@ struct io_ring_ctx { struct user_struct *user; - struct cred *creds; + const struct cred *creds; /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */ struct completion *completions; @@ -4759,7 +4759,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) ctx->compat = in_compat_syscall(); ctx->account_mem = account_mem; ctx->user = user; - ctx->creds = prepare_creds(); + ctx->creds = get_current_cred(); ret = io_allocate_scq_urings(ctx, p); if (ret) From 441cdbd5449b4923cd413d3ba748124f91388be9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 2 Dec 2019 18:49:10 -0700 Subject: [PATCH 02/33] io_uring: transform send/recvmsg() -ERESTARTSYS to -EINTR We should never return -ERESTARTSYS to userspace, transform it into -EINTR. Cc: stable@vger.kernel.org # v5.3+ Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5cab7a047317..a91743e1fa2c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1917,6 +1917,8 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = fn(sock, msg, flags); if (force_nonblock && ret == -EAGAIN) return ret; + if (ret == -ERESTARTSYS) + ret = -EINTR; } io_cqring_add_event(req, ret); From 490547ca2df66b8413bce97cb651630f2c531487 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 2 Dec 2019 10:21:34 -0800 Subject: [PATCH 03/33] block: don't send uevent for empty disk when not invalidating Commit 6917d0689993 ("block: merge invalidate_partitions into rescan_partitions") caused a regression where systemd-udevd spins forever using max CPU starting at boot time. It's caused by a behavior change where a KOBJ_CHANGE uevent is now sent in a case where previously it wasn't. Restore the old behavior. Fixes: 6917d0689993 ("block: merge invalidate_partitions into rescan_partitions") Reviewed-by: Christoph Hellwig Signed-off-by: Eric Biggers Signed-off-by: Jens Axboe --- fs/block_dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/block_dev.c b/fs/block_dev.c index ee63c2732fa2..69bf2fb6f7cd 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1531,7 +1531,7 @@ rescan: ret = blk_add_partitions(disk, bdev); if (ret == -EAGAIN) goto rescan; - } else { + } else if (invalidate) { /* * Tell userspace that the media / partition table may have * changed. From 1a6b74fc87024db59d41cd7346bd437f20fb3e2d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 2 Dec 2019 10:33:15 -0700 Subject: [PATCH 04/33] io_uring: add general async offload context Right now we just copy the sqe for async offload, but we want to store more context across an async punt. In preparation for doing so, put the sqe copy inside a structure that we can expand. With this pointer added, we can get rid of REQ_F_FREE_SQE, as that is now indicated by whether req->io is NULL or not. No functional changes in this patch. Signed-off-by: Jens Axboe --- fs/io_uring.c | 56 +++++++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index a91743e1fa2c..bbbd9f664b1e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -308,6 +308,10 @@ struct io_timeout { struct io_timeout_data *data; }; +struct io_async_ctx { + struct io_uring_sqe sqe; +}; + /* * NOTE! Each of the iocb union members has the file pointer * as the first entry in their struct definition. So you can @@ -323,6 +327,7 @@ struct io_kiocb { }; const struct io_uring_sqe *sqe; + struct io_async_ctx *io; struct file *ring_file; int ring_fd; bool has_user; @@ -353,7 +358,6 @@ struct io_kiocb { #define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */ #define REQ_F_INFLIGHT 16384 /* on inflight list */ #define REQ_F_COMP_LOCKED 32768 /* completion under lock */ -#define REQ_F_FREE_SQE 65536 /* free sqe if not async queued */ u64 user_data; u32 result; u32 sequence; @@ -806,6 +810,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, } got_it: + req->io = NULL; req->ring_file = NULL; req->file = NULL; req->ctx = ctx; @@ -836,8 +841,8 @@ static void __io_free_req(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - if (req->flags & REQ_F_FREE_SQE) - kfree(req->sqe); + if (req->io) + kfree(req->io); if (req->file && !(req->flags & REQ_F_FIXED_FILE)) fput(req->file); if (req->flags & REQ_F_INFLIGHT) { @@ -1079,9 +1084,9 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, * completions for those, only batch free for fixed * file and non-linked commands. */ - if (((req->flags & - (REQ_F_FIXED_FILE|REQ_F_LINK|REQ_F_FREE_SQE)) == - REQ_F_FIXED_FILE) && !io_is_fallback_req(req)) { + if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) == + REQ_F_FIXED_FILE) && !io_is_fallback_req(req) && + !req->io) { reqs[to_free++] = req; if (to_free == ARRAY_SIZE(reqs)) io_free_req_many(ctx, reqs, &to_free); @@ -2259,7 +2264,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (!poll->wait) return -ENOMEM; - req->sqe = NULL; + req->io = NULL; INIT_IO_WORK(&req->work, io_poll_complete_work); events = READ_ONCE(sqe->poll_events); poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; @@ -2602,27 +2607,27 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, static int io_req_defer(struct io_kiocb *req) { - struct io_uring_sqe *sqe_copy; struct io_ring_ctx *ctx = req->ctx; + struct io_async_ctx *io; /* Still need defer if there is pending req in defer list. */ if (!req_need_defer(req) && list_empty(&ctx->defer_list)) return 0; - sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL); - if (!sqe_copy) + io = kmalloc(sizeof(*io), GFP_KERNEL); + if (!io) return -EAGAIN; spin_lock_irq(&ctx->completion_lock); if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { spin_unlock_irq(&ctx->completion_lock); - kfree(sqe_copy); + kfree(io); return 0; } - memcpy(sqe_copy, req->sqe, sizeof(*sqe_copy)); - req->flags |= REQ_F_FREE_SQE; - req->sqe = sqe_copy; + memcpy(&io->sqe, req->sqe, sizeof(io->sqe)); + req->sqe = &io->sqe; + req->io = io; trace_io_uring_defer(ctx, req, req->user_data); list_add_tail(&req->list, &ctx->defer_list); @@ -2955,14 +2960,16 @@ static void __io_queue_sqe(struct io_kiocb *req) */ if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || (req->flags & REQ_F_MUST_PUNT))) { - struct io_uring_sqe *sqe_copy; + struct io_async_ctx *io; - sqe_copy = kmemdup(req->sqe, sizeof(*sqe_copy), GFP_KERNEL); - if (!sqe_copy) + io = kmalloc(sizeof(*io), GFP_KERNEL); + if (!io) goto err; - req->sqe = sqe_copy; - req->flags |= REQ_F_FREE_SQE; + memcpy(&io->sqe, req->sqe, sizeof(io->sqe)); + + req->sqe = &io->sqe; + req->io = io; if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) { ret = io_grab_files(req); @@ -3063,7 +3070,7 @@ err_req: */ if (*link) { struct io_kiocb *prev = *link; - struct io_uring_sqe *sqe_copy; + struct io_async_ctx *io; if (req->sqe->flags & IOSQE_IO_DRAIN) (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN; @@ -3079,14 +3086,15 @@ err_req: } } - sqe_copy = kmemdup(req->sqe, sizeof(*sqe_copy), GFP_KERNEL); - if (!sqe_copy) { + io = kmalloc(sizeof(*io), GFP_KERNEL); + if (!io) { ret = -EAGAIN; goto err_req; } - req->sqe = sqe_copy; - req->flags |= REQ_F_FREE_SQE; + memcpy(&io->sqe, req->sqe, sizeof(io->sqe)); + req->sqe = &io->sqe; + req->io = io; trace_io_uring_link(ctx, req, prev); list_add_tail(&req->list, &prev->link_list); } else if (req->sqe->flags & IOSQE_IO_LINK) { From f67676d160c6ee2ed82917fadfed6d29cab8237c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 2 Dec 2019 11:03:47 -0700 Subject: [PATCH 05/33] io_uring: ensure async punted read/write requests copy iovec MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we don't copy the iovecs when we punt to async context. This can be problematic for applications that store the iovec on the stack, as they often assume that it's safe to let the iovec go out of scope as soon as IO submission has been called. This isn't always safe, as we will re-copy the iovec once we're in async context. Make this 100% safe by copying the iovec just once. With this change, applications may safely store the iovec on the stack for all cases. Reported-by: 李通洲 Signed-off-by: Jens Axboe --- fs/io_uring.c | 247 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 183 insertions(+), 64 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index bbbd9f664b1e..1689aea55527 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -308,8 +308,18 @@ struct io_timeout { struct io_timeout_data *data; }; +struct io_async_rw { + struct iovec fast_iov[UIO_FASTIOV]; + struct iovec *iov; + ssize_t nr_segs; + ssize_t size; +}; + struct io_async_ctx { struct io_uring_sqe sqe; + union { + struct io_async_rw rw; + }; }; /* @@ -1415,15 +1425,6 @@ static int io_prep_rw(struct io_kiocb *req, bool force_nonblock) if (S_ISREG(file_inode(req->file)->i_mode)) req->flags |= REQ_F_ISREG; - /* - * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so - * we know to async punt it even if it was opened O_NONBLOCK - */ - if (force_nonblock && !io_file_supports_async(req->file)) { - req->flags |= REQ_F_MUST_PUNT; - return -EAGAIN; - } - kiocb->ki_pos = READ_ONCE(sqe->off); kiocb->ki_flags = iocb_flags(kiocb->ki_filp); kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp)); @@ -1592,6 +1593,16 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, return io_import_fixed(req->ctx, rw, sqe, iter); } + if (req->io) { + struct io_async_rw *iorw = &req->io->rw; + + *iovec = iorw->iov; + iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size); + if (iorw->iov == iorw->fast_iov) + *iovec = NULL; + return iorw->size; + } + if (!req->has_user) return -EFAULT; @@ -1662,6 +1673,50 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, return ret; } +static void io_req_map_io(struct io_kiocb *req, ssize_t io_size, + struct iovec *iovec, struct iovec *fast_iov, + struct iov_iter *iter) +{ + req->io->rw.nr_segs = iter->nr_segs; + req->io->rw.size = io_size; + req->io->rw.iov = iovec; + if (!req->io->rw.iov) { + req->io->rw.iov = req->io->rw.fast_iov; + memcpy(req->io->rw.iov, fast_iov, + sizeof(struct iovec) * iter->nr_segs); + } +} + +static int io_setup_async_io(struct io_kiocb *req, ssize_t io_size, + struct iovec *iovec, struct iovec *fast_iov, + struct iov_iter *iter) +{ + req->io = kmalloc(sizeof(*req->io), GFP_KERNEL); + if (req->io) { + io_req_map_io(req, io_size, iovec, fast_iov, iter); + memcpy(&req->io->sqe, req->sqe, sizeof(req->io->sqe)); + req->sqe = &req->io->sqe; + return 0; + } + + return -ENOMEM; +} + +static int io_read_prep(struct io_kiocb *req, struct iovec **iovec, + struct iov_iter *iter, bool force_nonblock) +{ + ssize_t ret; + + ret = io_prep_rw(req, force_nonblock); + if (ret) + return ret; + + if (unlikely(!(req->file->f_mode & FMODE_READ))) + return -EBADF; + + return io_import_iovec(READ, req, iovec, iter); +} + static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, bool force_nonblock) { @@ -1670,23 +1725,31 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, struct iov_iter iter; struct file *file; size_t iov_count; - ssize_t read_size, ret; + ssize_t io_size, ret; - ret = io_prep_rw(req, force_nonblock); - if (ret) - return ret; - file = kiocb->ki_filp; + if (!req->io) { + ret = io_read_prep(req, &iovec, &iter, force_nonblock); + if (ret < 0) + return ret; + } else { + ret = io_import_iovec(READ, req, &iovec, &iter); + if (ret < 0) + return ret; + } - if (unlikely(!(file->f_mode & FMODE_READ))) - return -EBADF; - - ret = io_import_iovec(READ, req, &iovec, &iter); - if (ret < 0) - return ret; - - read_size = ret; + file = req->file; + io_size = ret; if (req->flags & REQ_F_LINK) - req->result = read_size; + req->result = io_size; + + /* + * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so + * we know to async punt it even if it was opened O_NONBLOCK + */ + if (force_nonblock && !io_file_supports_async(file)) { + req->flags |= REQ_F_MUST_PUNT; + goto copy_iov; + } iov_count = iov_iter_count(&iter); ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count); @@ -1708,18 +1771,40 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, */ if (force_nonblock && !(req->flags & REQ_F_NOWAIT) && (req->flags & REQ_F_ISREG) && - ret2 > 0 && ret2 < read_size) + ret2 > 0 && ret2 < io_size) ret2 = -EAGAIN; /* Catch -EAGAIN return for forced non-blocking submission */ - if (!force_nonblock || ret2 != -EAGAIN) + if (!force_nonblock || ret2 != -EAGAIN) { kiocb_done(kiocb, ret2, nxt, req->in_async); - else - ret = -EAGAIN; + } else { +copy_iov: + ret = io_setup_async_io(req, io_size, iovec, + inline_vecs, &iter); + if (ret) + goto out_free; + return -EAGAIN; + } } +out_free: kfree(iovec); return ret; } +static int io_write_prep(struct io_kiocb *req, struct iovec **iovec, + struct iov_iter *iter, bool force_nonblock) +{ + ssize_t ret; + + ret = io_prep_rw(req, force_nonblock); + if (ret) + return ret; + + if (unlikely(!(req->file->f_mode & FMODE_WRITE))) + return -EBADF; + + return io_import_iovec(WRITE, req, iovec, iter); +} + static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, bool force_nonblock) { @@ -1728,29 +1813,36 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, struct iov_iter iter; struct file *file; size_t iov_count; - ssize_t ret; + ssize_t ret, io_size; - ret = io_prep_rw(req, force_nonblock); - if (ret) - return ret; + if (!req->io) { + ret = io_write_prep(req, &iovec, &iter, force_nonblock); + if (ret < 0) + return ret; + } else { + ret = io_import_iovec(WRITE, req, &iovec, &iter); + if (ret < 0) + return ret; + } file = kiocb->ki_filp; - if (unlikely(!(file->f_mode & FMODE_WRITE))) - return -EBADF; - - ret = io_import_iovec(WRITE, req, &iovec, &iter); - if (ret < 0) - return ret; - + io_size = ret; if (req->flags & REQ_F_LINK) - req->result = ret; + req->result = io_size; + + /* + * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so + * we know to async punt it even if it was opened O_NONBLOCK + */ + if (force_nonblock && !io_file_supports_async(req->file)) { + req->flags |= REQ_F_MUST_PUNT; + goto copy_iov; + } + + if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) + goto copy_iov; iov_count = iov_iter_count(&iter); - - ret = -EAGAIN; - if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) - goto out_free; - ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count); if (!ret) { ssize_t ret2; @@ -1774,10 +1866,16 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, ret2 = call_write_iter(file, kiocb, &iter); else ret2 = loop_rw_iter(WRITE, file, kiocb, &iter); - if (!force_nonblock || ret2 != -EAGAIN) + if (!force_nonblock || ret2 != -EAGAIN) { kiocb_done(kiocb, ret2, nxt, req->in_async); - else - ret = -EAGAIN; + } else { +copy_iov: + ret = io_setup_async_io(req, io_size, iovec, + inline_vecs, &iter); + if (ret) + goto out_free; + return -EAGAIN; + } } out_free: kfree(iovec); @@ -2605,10 +2703,42 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, return 0; } +static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io) +{ + struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; + struct iov_iter iter; + ssize_t ret; + + memcpy(&io->sqe, req->sqe, sizeof(io->sqe)); + req->sqe = &io->sqe; + + switch (io->sqe.opcode) { + case IORING_OP_READV: + case IORING_OP_READ_FIXED: + ret = io_read_prep(req, &iovec, &iter, true); + break; + case IORING_OP_WRITEV: + case IORING_OP_WRITE_FIXED: + ret = io_write_prep(req, &iovec, &iter, true); + break; + default: + req->io = io; + return 0; + } + + if (ret < 0) + return ret; + + req->io = io; + io_req_map_io(req, ret, iovec, inline_vecs, &iter); + return 0; +} + static int io_req_defer(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; struct io_async_ctx *io; + int ret; /* Still need defer if there is pending req in defer list. */ if (!req_need_defer(req) && list_empty(&ctx->defer_list)) @@ -2625,9 +2755,9 @@ static int io_req_defer(struct io_kiocb *req) return 0; } - memcpy(&io->sqe, req->sqe, sizeof(io->sqe)); - req->sqe = &io->sqe; - req->io = io; + ret = io_req_defer_prep(req, io); + if (ret < 0) + return ret; trace_io_uring_defer(ctx, req, req->user_data); list_add_tail(&req->list, &ctx->defer_list); @@ -2960,17 +3090,6 @@ static void __io_queue_sqe(struct io_kiocb *req) */ if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || (req->flags & REQ_F_MUST_PUNT))) { - struct io_async_ctx *io; - - io = kmalloc(sizeof(*io), GFP_KERNEL); - if (!io) - goto err; - - memcpy(&io->sqe, req->sqe, sizeof(io->sqe)); - - req->sqe = &io->sqe; - req->io = io; - if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) { ret = io_grab_files(req); if (ret) @@ -3092,9 +3211,9 @@ err_req: goto err_req; } - memcpy(&io->sqe, req->sqe, sizeof(io->sqe)); - req->sqe = &io->sqe; - req->io = io; + ret = io_req_defer_prep(req, io); + if (ret) + goto err_req; trace_io_uring_link(ctx, req, prev); list_add_tail(&req->list, &prev->link_list); } else if (req->sqe->flags & IOSQE_IO_LINK) { From 03b1230ca12a12e045d83b0357792075bf94a1e0 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 2 Dec 2019 18:50:25 -0700 Subject: [PATCH 06/33] io_uring: ensure async punted sendmsg/recvmsg requests copy data Just like commit f67676d160c6 for read/write requests, this one ensures that the msghdr data is fully copied if we need to punt a recvmsg or sendmsg system call to async context. Signed-off-by: Jens Axboe --- fs/io_uring.c | 165 ++++++++++++++++++++++++++++++++++------- include/linux/socket.h | 15 +++- net/socket.c | 60 +++++---------- 3 files changed, 166 insertions(+), 74 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 1689aea55527..2700382ebcc7 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -308,6 +308,13 @@ struct io_timeout { struct io_timeout_data *data; }; +struct io_async_msghdr { + struct iovec fast_iov[UIO_FASTIOV]; + struct iovec *iov; + struct sockaddr __user *uaddr; + struct msghdr msg; +}; + struct io_async_rw { struct iovec fast_iov[UIO_FASTIOV]; struct iovec *iov; @@ -319,6 +326,7 @@ struct io_async_ctx { struct io_uring_sqe sqe; union { struct io_async_rw rw; + struct io_async_msghdr msg; }; }; @@ -1991,12 +1999,104 @@ static int io_sync_file_range(struct io_kiocb *req, return 0; } -#if defined(CONFIG_NET) -static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock, - long (*fn)(struct socket *, struct user_msghdr __user *, - unsigned int)) +static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io) { +#if defined(CONFIG_NET) + const struct io_uring_sqe *sqe = req->sqe; + struct user_msghdr __user *msg; + unsigned flags; + + flags = READ_ONCE(sqe->msg_flags); + msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr); + return sendmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.iov); +#else + return 0; +#endif +} + +static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_kiocb **nxt, bool force_nonblock) +{ +#if defined(CONFIG_NET) + struct socket *sock; + int ret; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + + sock = sock_from_file(req->file, &ret); + if (sock) { + struct io_async_ctx io, *copy; + struct sockaddr_storage addr; + struct msghdr *kmsg; + unsigned flags; + + flags = READ_ONCE(sqe->msg_flags); + if (flags & MSG_DONTWAIT) + req->flags |= REQ_F_NOWAIT; + else if (force_nonblock) + flags |= MSG_DONTWAIT; + + if (req->io) { + kmsg = &req->io->msg.msg; + kmsg->msg_name = &addr; + } else { + kmsg = &io.msg.msg; + kmsg->msg_name = &addr; + io.msg.iov = io.msg.fast_iov; + ret = io_sendmsg_prep(req, &io); + if (ret) + goto out; + } + + ret = __sys_sendmsg_sock(sock, kmsg, flags); + if (force_nonblock && ret == -EAGAIN) { + copy = kmalloc(sizeof(*copy), GFP_KERNEL); + if (!copy) { + ret = -ENOMEM; + goto out; + } + memcpy(©->msg, &io.msg, sizeof(copy->msg)); + req->io = copy; + memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); + req->sqe = &req->io->sqe; + return ret; + } + if (ret == -ERESTARTSYS) + ret = -EINTR; + } + +out: + io_cqring_add_event(req, ret); + if (ret < 0 && (req->flags & REQ_F_LINK)) + req->flags |= REQ_F_FAIL_LINK; + io_put_req_find_next(req, nxt); + return 0; +#else + return -EOPNOTSUPP; +#endif +} + +static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io) +{ +#if defined(CONFIG_NET) + const struct io_uring_sqe *sqe = req->sqe; + struct user_msghdr __user *msg; + unsigned flags; + + flags = READ_ONCE(sqe->msg_flags); + msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr); + return recvmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.uaddr, + &io->msg.iov); +#else + return 0; +#endif +} + +static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_kiocb **nxt, bool force_nonblock) +{ +#if defined(CONFIG_NET) struct socket *sock; int ret; @@ -2006,6 +2106,9 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, sock = sock_from_file(req->file, &ret); if (sock) { struct user_msghdr __user *msg; + struct io_async_ctx io, *copy; + struct sockaddr_storage addr; + struct msghdr *kmsg; unsigned flags; flags = READ_ONCE(sqe->msg_flags); @@ -2016,39 +2119,41 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, msg = (struct user_msghdr __user *) (unsigned long) READ_ONCE(sqe->addr); + if (req->io) { + kmsg = &req->io->msg.msg; + kmsg->msg_name = &addr; + } else { + kmsg = &io.msg.msg; + kmsg->msg_name = &addr; + io.msg.iov = io.msg.fast_iov; + ret = io_recvmsg_prep(req, &io); + if (ret) + goto out; + } - ret = fn(sock, msg, flags); - if (force_nonblock && ret == -EAGAIN) + ret = __sys_recvmsg_sock(sock, kmsg, msg, io.msg.uaddr, flags); + if (force_nonblock && ret == -EAGAIN) { + copy = kmalloc(sizeof(*copy), GFP_KERNEL); + if (!copy) { + ret = -ENOMEM; + goto out; + } + memcpy(copy, &io, sizeof(*copy)); + req->io = copy; + memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); + req->sqe = &req->io->sqe; return ret; + } if (ret == -ERESTARTSYS) ret = -EINTR; } +out: io_cqring_add_event(req, ret); if (ret < 0 && (req->flags & REQ_F_LINK)) req->flags |= REQ_F_FAIL_LINK; io_put_req_find_next(req, nxt); return 0; -} -#endif - -static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) -{ -#if defined(CONFIG_NET) - return io_send_recvmsg(req, sqe, nxt, force_nonblock, - __sys_sendmsg_sock); -#else - return -EOPNOTSUPP; -#endif -} - -static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **nxt, bool force_nonblock) -{ -#if defined(CONFIG_NET) - return io_send_recvmsg(req, sqe, nxt, force_nonblock, - __sys_recvmsg_sock); #else return -EOPNOTSUPP; #endif @@ -2721,6 +2826,12 @@ static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io) case IORING_OP_WRITE_FIXED: ret = io_write_prep(req, &iovec, &iter, true); break; + case IORING_OP_SENDMSG: + ret = io_sendmsg_prep(req, io); + break; + case IORING_OP_RECVMSG: + ret = io_recvmsg_prep(req, io); + break; default: req->io = io; return 0; diff --git a/include/linux/socket.h b/include/linux/socket.h index 4bde63021c09..903507fb901f 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -378,12 +378,19 @@ extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, bool forbid_cmsg_compat); -extern long __sys_sendmsg_sock(struct socket *sock, - struct user_msghdr __user *msg, +extern long __sys_sendmsg_sock(struct socket *sock, struct msghdr *msg, unsigned int flags); -extern long __sys_recvmsg_sock(struct socket *sock, - struct user_msghdr __user *msg, +extern long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg, + struct user_msghdr __user *umsg, + struct sockaddr __user *uaddr, unsigned int flags); +extern int sendmsg_copy_msghdr(struct msghdr *msg, + struct user_msghdr __user *umsg, unsigned flags, + struct iovec **iov); +extern int recvmsg_copy_msghdr(struct msghdr *msg, + struct user_msghdr __user *umsg, unsigned flags, + struct sockaddr __user **uaddr, + struct iovec **iov); /* helpers which do the actual work for syscalls */ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, diff --git a/net/socket.c b/net/socket.c index ea28cbb9e2e7..0fb0820edeec 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2346,9 +2346,9 @@ out: return err; } -static int sendmsg_copy_msghdr(struct msghdr *msg, - struct user_msghdr __user *umsg, unsigned flags, - struct iovec **iov) +int sendmsg_copy_msghdr(struct msghdr *msg, + struct user_msghdr __user *umsg, unsigned flags, + struct iovec **iov) { int err; @@ -2390,27 +2390,14 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, /* * BSD sendmsg interface */ -long __sys_sendmsg_sock(struct socket *sock, struct user_msghdr __user *umsg, +long __sys_sendmsg_sock(struct socket *sock, struct msghdr *msg, unsigned int flags) { - struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; - struct sockaddr_storage address; - struct msghdr msg = { .msg_name = &address }; - ssize_t err; - - err = sendmsg_copy_msghdr(&msg, umsg, flags, &iov); - if (err) - return err; /* disallow ancillary data requests from this path */ - if (msg.msg_control || msg.msg_controllen) { - err = -EINVAL; - goto out; - } + if (msg->msg_control || msg->msg_controllen) + return -EINVAL; - err = ____sys_sendmsg(sock, &msg, flags, NULL, 0); -out: - kfree(iov); - return err; + return ____sys_sendmsg(sock, msg, flags, NULL, 0); } long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned int flags, @@ -2516,10 +2503,10 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, return __sys_sendmmsg(fd, mmsg, vlen, flags, true); } -static int recvmsg_copy_msghdr(struct msghdr *msg, - struct user_msghdr __user *umsg, unsigned flags, - struct sockaddr __user **uaddr, - struct iovec **iov) +int recvmsg_copy_msghdr(struct msghdr *msg, + struct user_msghdr __user *umsg, unsigned flags, + struct sockaddr __user **uaddr, + struct iovec **iov) { ssize_t err; @@ -2609,28 +2596,15 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, * BSD recvmsg interface */ -long __sys_recvmsg_sock(struct socket *sock, struct user_msghdr __user *umsg, - unsigned int flags) +long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg, + struct user_msghdr __user *umsg, + struct sockaddr __user *uaddr, unsigned int flags) { - struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; - struct sockaddr_storage address; - struct msghdr msg = { .msg_name = &address }; - struct sockaddr __user *uaddr; - ssize_t err; - - err = recvmsg_copy_msghdr(&msg, umsg, flags, &uaddr, &iov); - if (err) - return err; /* disallow ancillary data requests from this path */ - if (msg.msg_control || msg.msg_controllen) { - err = -EINVAL; - goto out; - } + if (msg->msg_control || msg->msg_controllen) + return -EINVAL; - err = ____sys_recvmsg(sock, &msg, umsg, uaddr, flags, 0); -out: - kfree(iov); - return err; + return ____sys_recvmsg(sock, msg, umsg, uaddr, flags, 0); } long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned int flags, From f499a021ea8c9f70321fce3d674d8eca5bbeee2c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 2 Dec 2019 16:28:46 -0700 Subject: [PATCH 07/33] io_uring: ensure async punted connect requests copy data Just like commit f67676d160c6 for read/write requests, this one ensures that the sockaddr data has been copied for IORING_OP_CONNECT if we need to punt the request to async context. Signed-off-by: Jens Axboe --- fs/io_uring.c | 51 ++++++++++++++++++++++++++++++++++++++---- include/linux/socket.h | 5 ++--- net/socket.c | 16 ++++++------- 3 files changed, 57 insertions(+), 15 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 2700382ebcc7..5fcd89c507ec 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -308,6 +308,10 @@ struct io_timeout { struct io_timeout_data *data; }; +struct io_async_connect { + struct sockaddr_storage address; +}; + struct io_async_msghdr { struct iovec fast_iov[UIO_FASTIOV]; struct iovec *iov; @@ -327,6 +331,7 @@ struct io_async_ctx { union { struct io_async_rw rw; struct io_async_msghdr msg; + struct io_async_connect connect; }; }; @@ -2195,11 +2200,26 @@ static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe, #endif } +static int io_connect_prep(struct io_kiocb *req, struct io_async_ctx *io) +{ +#if defined(CONFIG_NET) + const struct io_uring_sqe *sqe = req->sqe; + struct sockaddr __user *addr; + int addr_len; + + addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr); + addr_len = READ_ONCE(sqe->addr2); + return move_addr_to_kernel(addr, addr_len, &io->connect.address); +#else + return 0; +#endif +} + static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe, struct io_kiocb **nxt, bool force_nonblock) { #if defined(CONFIG_NET) - struct sockaddr __user *addr; + struct io_async_ctx __io, *io; unsigned file_flags; int addr_len, ret; @@ -2208,15 +2228,35 @@ static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags) return -EINVAL; - addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr); addr_len = READ_ONCE(sqe->addr2); file_flags = force_nonblock ? O_NONBLOCK : 0; - ret = __sys_connect_file(req->file, addr, addr_len, file_flags); - if (ret == -EAGAIN && force_nonblock) + if (req->io) { + io = req->io; + } else { + ret = io_connect_prep(req, &__io); + if (ret) + goto out; + io = &__io; + } + + ret = __sys_connect_file(req->file, &io->connect.address, addr_len, + file_flags); + if (ret == -EAGAIN && force_nonblock) { + io = kmalloc(sizeof(*io), GFP_KERNEL); + if (!io) { + ret = -ENOMEM; + goto out; + } + memcpy(&io->connect, &__io.connect, sizeof(io->connect)); + req->io = io; + memcpy(&io->sqe, req->sqe, sizeof(*req->sqe)); + req->sqe = &io->sqe; return -EAGAIN; + } if (ret == -ERESTARTSYS) ret = -EINTR; +out: if (ret < 0 && (req->flags & REQ_F_LINK)) req->flags |= REQ_F_FAIL_LINK; io_cqring_add_event(req, ret); @@ -2832,6 +2872,9 @@ static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io) case IORING_OP_RECVMSG: ret = io_recvmsg_prep(req, io); break; + case IORING_OP_CONNECT: + ret = io_connect_prep(req, io); + break; default: req->io = io; return 0; diff --git a/include/linux/socket.h b/include/linux/socket.h index 903507fb901f..2d2313403101 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -406,9 +406,8 @@ extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); extern int __sys_socket(int family, int type, int protocol); extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen); -extern int __sys_connect_file(struct file *file, - struct sockaddr __user *uservaddr, int addrlen, - int file_flags); +extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr, + int addrlen, int file_flags); extern int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen); extern int __sys_listen(int fd, int backlog); diff --git a/net/socket.c b/net/socket.c index 0fb0820edeec..b343db1489bd 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1826,26 +1826,22 @@ SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, * include the -EINPROGRESS status for such sockets. */ -int __sys_connect_file(struct file *file, struct sockaddr __user *uservaddr, +int __sys_connect_file(struct file *file, struct sockaddr_storage *address, int addrlen, int file_flags) { struct socket *sock; - struct sockaddr_storage address; int err; sock = sock_from_file(file, &err); if (!sock) goto out; - err = move_addr_to_kernel(uservaddr, addrlen, &address); - if (err < 0) - goto out; err = - security_socket_connect(sock, (struct sockaddr *)&address, addrlen); + security_socket_connect(sock, (struct sockaddr *)address, addrlen); if (err) goto out; - err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, + err = sock->ops->connect(sock, (struct sockaddr *)address, addrlen, sock->file->f_flags | file_flags); out: return err; @@ -1858,7 +1854,11 @@ int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen) f = fdget(fd); if (f.file) { - ret = __sys_connect_file(f.file, uservaddr, addrlen, 0); + struct sockaddr_storage address; + + ret = move_addr_to_kernel(uservaddr, addrlen, &address); + if (!ret) + ret = __sys_connect_file(f.file, &address, addrlen, 0); if (f.flags) fput(f.file); } From da8c96906990f1108cb626ee7865e69267a3263b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 2 Dec 2019 18:51:26 -0700 Subject: [PATCH 08/33] io_uring: mark us with IORING_FEAT_SUBMIT_STABLE If this flag is set, applications can be certain that any data for async offload has been consumed when the kernel has consumed the SQE. Signed-off-by: Jens Axboe --- fs/io_uring.c | 3 ++- include/uapi/linux/io_uring.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5fcd89c507ec..c47a08afcee5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5077,7 +5077,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) if (ret < 0) goto err; - p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP; + p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | + IORING_FEAT_SUBMIT_STABLE; trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); return ret; err: diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 4637ed1d9949..eabccb46edd1 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -157,6 +157,7 @@ struct io_uring_params { */ #define IORING_FEAT_SINGLE_MMAP (1U << 0) #define IORING_FEAT_NODROP (1U << 1) +#define IORING_FEAT_SUBMIT_STABLE (1U << 2) /* * io_uring_register(2) opcodes and arguments From 22efde5998657f6d1f31592c659aa3a9c7ad65f1 Mon Sep 17 00:00:00 2001 From: Jackie Liu Date: Mon, 2 Dec 2019 17:14:52 +0800 Subject: [PATCH 09/33] io_uring: remove parameter ctx of io_submit_state_start Parameter ctx we have never used, clean it up. Signed-off-by: Jackie Liu Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c47a08afcee5..f7985f270d4a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3396,7 +3396,7 @@ static void io_submit_state_end(struct io_submit_state *state) * Start submission side cache. */ static void io_submit_state_start(struct io_submit_state *state, - struct io_ring_ctx *ctx, unsigned max_ios) + unsigned int max_ios) { blk_start_plug(&state->plug); state->free_reqs = 0; @@ -3480,7 +3480,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, return -EBUSY; if (nr > IO_PLUG_THRESHOLD) { - io_submit_state_start(&state, ctx, nr); + io_submit_state_start(&state, nr); statep = &state; } From 8cdda87a4414092cd210e766189cf0353a844861 Mon Sep 17 00:00:00 2001 From: Jackie Liu Date: Mon, 2 Dec 2019 17:14:53 +0800 Subject: [PATCH 10/33] io_uring: remove io_wq_current_is_worker Since commit b18fdf71e01f ("io_uring: simplify io_req_link_next()"), the io_wq_current_is_worker function is no longer needed, clean it up. Signed-off-by: Jackie Liu Signed-off-by: Jens Axboe --- fs/io-wq.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/fs/io-wq.h b/fs/io-wq.h index dd0af0d7376c..892db0bb64b1 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -118,10 +118,6 @@ static inline void io_wq_worker_sleeping(struct task_struct *tsk) static inline void io_wq_worker_running(struct task_struct *tsk) { } -#endif +#endif /* CONFIG_IO_WQ */ -static inline bool io_wq_current_is_worker(void) -{ - return in_task() && (current->flags & PF_IO_WORKER); -} -#endif +#endif /* INTERNAL_IO_WQ_H */ From 795ee49c1a28d1b3eeb2b463f18d557700fc6153 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 30 Nov 2019 23:23:52 +0300 Subject: [PATCH 11/33] block: optimise bvec_iter_advance() bvec_iter_advance() is quite popular, but compilers fail to do proper alias analysis and optimise it good enough. The assembly is checked for gcc 9.2, x86-64. - remove @iter->bi_size from min(...), as it's always less than @bytes. Modify at the beginning and forget about it. - the compiler isn't able to collapse memory dependencies and remove writes in the loop. Help it by explicitely using local vars. Signed-off-by: Arvind Sankar Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- include/linux/bvec.h | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/include/linux/bvec.h b/include/linux/bvec.h index a032f01e928c..679a42253170 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -87,26 +87,24 @@ struct bvec_iter_all { static inline bool bvec_iter_advance(const struct bio_vec *bv, struct bvec_iter *iter, unsigned bytes) { + unsigned int idx = iter->bi_idx; + if (WARN_ONCE(bytes > iter->bi_size, "Attempted to advance past end of bvec iter\n")) { iter->bi_size = 0; return false; } - while (bytes) { - const struct bio_vec *cur = bv + iter->bi_idx; - unsigned len = min3(bytes, iter->bi_size, - cur->bv_len - iter->bi_bvec_done); + iter->bi_size -= bytes; + bytes += iter->bi_bvec_done; - bytes -= len; - iter->bi_size -= len; - iter->bi_bvec_done += len; - - if (iter->bi_bvec_done == cur->bv_len) { - iter->bi_bvec_done = 0; - iter->bi_idx++; - } + while (bytes && bytes >= bv[idx].bv_len) { + bytes -= bv[idx].bv_len; + idx++; } + + iter->bi_idx = idx; + iter->bi_bvec_done = bytes; return true; } From 5c4bd1f40c23d08ffbdccd68a5fd63751c794d89 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Tue, 3 Dec 2019 10:39:01 +0100 Subject: [PATCH 12/33] null_blk: fix zone size paramter check For zoned=1 mode, the zone size must be a power of 2. Check this not only when the zone size is specified during modprobe, but also when creating a zoned null_blk device using configfs. Signed-off-by: Damien Le Moal Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/block/null_blk_main.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 795fda576824..53ba9c7f2786 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1607,7 +1607,7 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) return blk_mq_alloc_tag_set(set); } -static void null_validate_conf(struct nullb_device *dev) +static int null_validate_conf(struct nullb_device *dev) { dev->blocksize = round_down(dev->blocksize, 512); dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); @@ -1634,6 +1634,14 @@ static void null_validate_conf(struct nullb_device *dev) /* can not stop a queue */ if (dev->queue_mode == NULL_Q_BIO) dev->mbps = 0; + + if (dev->zoned && + (!dev->zone_size || !is_power_of_2(dev->zone_size))) { + pr_err("zone_size must be power-of-two\n"); + return -EINVAL; + } + + return 0; } #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION @@ -1666,7 +1674,9 @@ static int null_add_dev(struct nullb_device *dev) struct nullb *nullb; int rv; - null_validate_conf(dev); + rv = null_validate_conf(dev); + if (rv) + return rv; nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); if (!nullb) { @@ -1792,11 +1802,6 @@ static int __init null_init(void) g_bs = PAGE_SIZE; } - if (!is_power_of_2(g_zone_size)) { - pr_err("zone_size must be power-of-two\n"); - return -EINVAL; - } - if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { pr_err("invalid home_node value\n"); g_home_node = NUMA_NO_NODE; From 979d54475e0b75a28e55528617fecf83b4a221da Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 3 Dec 2019 10:39:02 +0100 Subject: [PATCH 13/33] null_blk: cleanup null_gendisk_register Use a saner size calculation, and do a trivial cleanup on the zone revalidation to prepare to future changes. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/block/null_blk_main.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 53ba9c7f2786..dd6026289fbf 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1559,14 +1559,14 @@ static int init_driver_queues(struct nullb *nullb) static int null_gendisk_register(struct nullb *nullb) { + sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT; struct gendisk *disk; - sector_t size; + int ret; disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node); if (!disk) return -ENOMEM; - size = (sector_t)nullb->dev->size * 1024 * 1024ULL; - set_capacity(disk, size >> 9); + set_capacity(disk, size); disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; disk->major = null_major; @@ -1577,9 +1577,8 @@ static int null_gendisk_register(struct nullb *nullb) strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); if (nullb->dev->zoned) { - int ret = blk_revalidate_disk_zones(disk); - - if (ret != 0) + ret = blk_revalidate_disk_zones(disk); + if (ret) return ret; } From bb55628288fcd96d919a9ecc59dd26704a65493b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 3 Dec 2019 10:39:03 +0100 Subject: [PATCH 14/33] block: remove the empty line at the end of blk-zoned.c Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-zoned.c | 1 - 1 file changed, 1 deletion(-) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 6fad6f3f6980..618786f8275c 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -488,4 +488,3 @@ int blk_revalidate_disk_zones(struct gendisk *disk) return ret; } EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); - From 9b38bb4b1e6de47b379afaad2c707df639bb4dc7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 3 Dec 2019 10:39:04 +0100 Subject: [PATCH 15/33] block: simplify blkdev_nr_zones Simplify the arguments to blkdev_nr_zones by passing a gendisk instead of the block_device and capacity. This also removes the need for __blkdev_nr_zones as all callers are outside the fast path and can deal with the additional branch. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-zoned.c | 26 ++++++++------------------ block/ioctl.c | 2 +- drivers/md/dm-zoned-target.c | 2 +- include/linux/blkdev.h | 5 ++--- 4 files changed, 12 insertions(+), 23 deletions(-) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 618786f8275c..65a9bdc9fe27 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -70,30 +70,20 @@ void __blk_req_zone_write_unlock(struct request *rq) } EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock); -static inline unsigned int __blkdev_nr_zones(struct request_queue *q, - sector_t nr_sectors) -{ - sector_t zone_sectors = blk_queue_zone_sectors(q); - - return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors); -} - /** * blkdev_nr_zones - Get number of zones - * @bdev: Target block device + * @disk: Target gendisk * - * Description: - * Return the total number of zones of a zoned block device. - * For a regular block device, the number of zones is always 0. + * Return the total number of zones of a zoned block device. For a block + * device without zone capabilities, the number of zones is always 0. */ -unsigned int blkdev_nr_zones(struct block_device *bdev) +unsigned int blkdev_nr_zones(struct gendisk *disk) { - struct request_queue *q = bdev_get_queue(bdev); + sector_t zone_sectors = blk_queue_zone_sectors(disk->queue); - if (!blk_queue_is_zoned(q)) + if (!blk_queue_is_zoned(disk->queue)) return 0; - - return __blkdev_nr_zones(q, get_capacity(bdev->bd_disk)); + return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors); } EXPORT_SYMBOL_GPL(blkdev_nr_zones); @@ -447,7 +437,7 @@ static int blk_update_zone_info(struct gendisk *disk, unsigned int nr_zones, int blk_revalidate_disk_zones(struct gendisk *disk) { struct request_queue *q = disk->queue; - unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk)); + unsigned int nr_zones = blkdev_nr_zones(disk); struct blk_revalidate_zone_args args = { .disk = disk }; int ret = 0; diff --git a/block/ioctl.c b/block/ioctl.c index 7ac8a66c9787..5de98b97af2a 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -512,7 +512,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case BLKGETZONESZ: return put_uint(arg, bdev_zone_sectors(bdev)); case BLKGETNRZONES: - return put_uint(arg, blkdev_nr_zones(bdev)); + return put_uint(arg, blkdev_nr_zones(bdev->bd_disk)); case HDIO_GETGEO: return blkdev_getgeo(bdev, argp); case BLKRAGET: diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 4574e0dedbd6..70a1063161c0 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -727,7 +727,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path) dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors); dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks); - dev->nr_zones = blkdev_nr_zones(dev->bdev); + dev->nr_zones = blkdev_nr_zones(dev->bdev->bd_disk); dmz->dev = dev; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6012e2592628..c5852de402b6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -357,8 +357,7 @@ typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, #define BLK_ALL_ZONES ((unsigned int)-1) int blkdev_report_zones(struct block_device *bdev, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); - -extern unsigned int blkdev_nr_zones(struct block_device *bdev); +unsigned int blkdev_nr_zones(struct gendisk *disk); extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask); @@ -371,7 +370,7 @@ extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, #else /* CONFIG_BLK_DEV_ZONED */ -static inline unsigned int blkdev_nr_zones(struct block_device *bdev) +static inline unsigned int blkdev_nr_zones(struct gendisk *disk) { return 0; } From f216fdd77b5654f8c4f6fac6020d6aabc58878ef Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 3 Dec 2019 10:39:05 +0100 Subject: [PATCH 16/33] block: replace seq_zones_bitmap with conv_zones_bitmap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Invert the meaning of seq_zones_bitmap by keeping a bitmap of conventional zones. This allows not having a bitmap for devices that do not have conventional zones. Reviewed-by: Javier González Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-zoned.c | 18 +++++++++--------- include/linux/blkdev.h | 14 ++++++++------ 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 65a9bdc9fe27..9c3931051f4f 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -332,15 +332,15 @@ static inline unsigned long *blk_alloc_zone_bitmap(int node, void blk_queue_free_zone_bitmaps(struct request_queue *q) { - kfree(q->seq_zones_bitmap); - q->seq_zones_bitmap = NULL; + kfree(q->conv_zones_bitmap); + q->conv_zones_bitmap = NULL; kfree(q->seq_zones_wlock); q->seq_zones_wlock = NULL; } struct blk_revalidate_zone_args { struct gendisk *disk; - unsigned long *seq_zones_bitmap; + unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; sector_t sector; }; @@ -394,8 +394,8 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, return -ENODEV; } - if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) - set_bit(idx, args->seq_zones_bitmap); + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) + set_bit(idx, args->conv_zones_bitmap); args->sector += zone->len; return 0; @@ -415,8 +415,8 @@ static int blk_update_zone_info(struct gendisk *disk, unsigned int nr_zones, args->seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones); if (!args->seq_zones_wlock) return -ENOMEM; - args->seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones); - if (!args->seq_zones_bitmap) + args->conv_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones); + if (!args->conv_zones_bitmap) return -ENOMEM; ret = disk->fops->report_zones(disk, 0, nr_zones, @@ -465,7 +465,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk) if (ret >= 0) { q->nr_zones = nr_zones; swap(q->seq_zones_wlock, args.seq_zones_wlock); - swap(q->seq_zones_bitmap, args.seq_zones_bitmap); + swap(q->conv_zones_bitmap, args.conv_zones_bitmap); ret = 0; } else { pr_warn("%s: failed to revalidate zones\n", disk->disk_name); @@ -474,7 +474,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk) blk_mq_unfreeze_queue(q); kfree(args.seq_zones_wlock); - kfree(args.seq_zones_bitmap); + kfree(args.conv_zones_bitmap); return ret; } EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c5852de402b6..503c4d4c5884 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -503,9 +503,9 @@ struct request_queue { /* * Zoned block device information for request dispatch control. * nr_zones is the total number of zones of the device. This is always - * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones - * bits which indicates if a zone is conventional (bit clear) or - * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones + * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones + * bits which indicates if a zone is conventional (bit set) or + * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones * bits which indicates if a zone is write locked, that is, if a write * request targeting the zone was dispatched. All three fields are * initialized by the low level device driver (e.g. scsi/sd.c). @@ -518,7 +518,7 @@ struct request_queue { * blk_mq_unfreeze_queue(). */ unsigned int nr_zones; - unsigned long *seq_zones_bitmap; + unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; #endif /* CONFIG_BLK_DEV_ZONED */ @@ -723,9 +723,11 @@ static inline unsigned int blk_queue_zone_no(struct request_queue *q, static inline bool blk_queue_zone_is_seq(struct request_queue *q, sector_t sector) { - if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap) + if (!blk_queue_is_zoned(q)) return false; - return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); + if (!q->conv_zones_bitmap) + return true; + return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap); } #else /* CONFIG_BLK_DEV_ZONED */ static inline unsigned int blk_queue_nr_zones(struct request_queue *q) From e94f5819448c5b75829662eaa9c25c17868846cf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 3 Dec 2019 10:39:06 +0100 Subject: [PATCH 17/33] block: allocate the zone bitmaps lazily MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allocate the conventional zone bitmap and the sequential zone locking bitmap only when we find a zone of the respective type. This avoids wasting memory on the conventional zone bitmap for devices that only have sequential zones, and will also prepare for other future changes. Reviewed-by: Javier González Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-zoned.c | 65 +++++++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 33 deletions(-) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 9c3931051f4f..0131f9e14bd1 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -342,6 +342,7 @@ struct blk_revalidate_zone_args { struct gendisk *disk; unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; + unsigned int nr_zones; sector_t sector; }; @@ -385,8 +386,22 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, /* Check zone type */ switch (zone->type) { case BLK_ZONE_TYPE_CONVENTIONAL: + if (!args->conv_zones_bitmap) { + args->conv_zones_bitmap = + blk_alloc_zone_bitmap(q->node, args->nr_zones); + if (!args->conv_zones_bitmap) + return -ENOMEM; + } + set_bit(idx, args->conv_zones_bitmap); + break; case BLK_ZONE_TYPE_SEQWRITE_REQ: case BLK_ZONE_TYPE_SEQWRITE_PREF: + if (!args->seq_zones_wlock) { + args->seq_zones_wlock = + blk_alloc_zone_bitmap(q->node, args->nr_zones); + if (!args->seq_zones_wlock) + return -ENOMEM; + } break; default: pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n", @@ -394,37 +409,10 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, return -ENODEV; } - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) - set_bit(idx, args->conv_zones_bitmap); - args->sector += zone->len; return 0; } -static int blk_update_zone_info(struct gendisk *disk, unsigned int nr_zones, - struct blk_revalidate_zone_args *args) -{ - /* - * Ensure that all memory allocations in this context are done as - * if GFP_NOIO was specified. - */ - unsigned int noio_flag = memalloc_noio_save(); - struct request_queue *q = disk->queue; - int ret; - - args->seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones); - if (!args->seq_zones_wlock) - return -ENOMEM; - args->conv_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones); - if (!args->conv_zones_bitmap) - return -ENOMEM; - - ret = disk->fops->report_zones(disk, 0, nr_zones, - blk_revalidate_zone_cb, args); - memalloc_noio_restore(noio_flag); - return ret; -} - /** * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps * @disk: Target disk @@ -437,8 +425,10 @@ static int blk_update_zone_info(struct gendisk *disk, unsigned int nr_zones, int blk_revalidate_disk_zones(struct gendisk *disk) { struct request_queue *q = disk->queue; - unsigned int nr_zones = blkdev_nr_zones(disk); - struct blk_revalidate_zone_args args = { .disk = disk }; + struct blk_revalidate_zone_args args = { + .disk = disk, + .nr_zones = blkdev_nr_zones(disk), + }; int ret = 0; if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) @@ -449,12 +439,21 @@ int blk_revalidate_disk_zones(struct gendisk *disk) * needs to be updated so that the sysfs exposed value is correct. */ if (!queue_is_mq(q)) { - q->nr_zones = nr_zones; + q->nr_zones = args.nr_zones; return 0; } - if (nr_zones) - ret = blk_update_zone_info(disk, nr_zones, &args); + /* + * Ensure that all memory allocations in this context are done as + * if GFP_NOIO was specified. + */ + if (args.nr_zones) { + unsigned int noio_flag = memalloc_noio_save(); + + ret = disk->fops->report_zones(disk, 0, args.nr_zones, + blk_revalidate_zone_cb, &args); + memalloc_noio_restore(noio_flag); + } /* * Install the new bitmaps, making sure the queue is stopped and @@ -463,7 +462,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk) */ blk_mq_freeze_queue(q); if (ret >= 0) { - q->nr_zones = nr_zones; + q->nr_zones = args.nr_zones; swap(q->seq_zones_wlock, args.seq_zones_wlock); swap(q->conv_zones_bitmap, args.conv_zones_bitmap); ret = 0; From ae58954d8734c44298f55ed71e683ea944994fab Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 3 Dec 2019 10:39:07 +0100 Subject: [PATCH 18/33] block: don't handle bio based drivers in blk_revalidate_disk_zones MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit bio based drivers only need to update q->nr_zones. Do that manually instead of overloading blk_revalidate_disk_zones to keep that function simpler for the next round of changes that will rely even more on the request based functionality. Reviewed-by: Javier González Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-zoned.c | 16 +++++----------- drivers/block/null_blk_main.c | 12 +++++++++--- drivers/md/dm-table.c | 12 +++++++----- include/linux/blkdev.h | 5 ----- 4 files changed, 21 insertions(+), 24 deletions(-) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 0131f9e14bd1..51d427659ce7 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -419,8 +419,9 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, * * Helper function for low-level device drivers to (re) allocate and initialize * a disk request queue zone bitmaps. This functions should normally be called - * within the disk ->revalidate method. For BIO based queues, no zone bitmap - * is allocated. + * within the disk ->revalidate method for blk-mq based drivers. For BIO based + * drivers only q->nr_zones needs to be updated so that the sysfs exposed value + * is correct. */ int blk_revalidate_disk_zones(struct gendisk *disk) { @@ -433,15 +434,8 @@ int blk_revalidate_disk_zones(struct gendisk *disk) if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) return -EIO; - - /* - * BIO based queues do not use a scheduler so only q->nr_zones - * needs to be updated so that the sysfs exposed value is correct. - */ - if (!queue_is_mq(q)) { - q->nr_zones = args.nr_zones; - return 0; - } + if (WARN_ON_ONCE(!queue_is_mq(q))) + return -EIO; /* * Ensure that all memory allocations in this context are done as diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index dd6026289fbf..068cd0ae6e2c 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1576,11 +1576,17 @@ static int null_gendisk_register(struct nullb *nullb) disk->queue = nullb->q; strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); +#ifdef CONFIG_BLK_DEV_ZONED if (nullb->dev->zoned) { - ret = blk_revalidate_disk_zones(disk); - if (ret) - return ret; + if (queue_is_mq(nullb->q)) { + ret = blk_revalidate_disk_zones(disk); + if (ret) + return ret; + } else { + nullb->q->nr_zones = blkdev_nr_zones(disk); + } } +#endif add_disk(disk); return 0; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 2ae0c1913766..0a2cc197f62b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1954,12 +1954,14 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, /* * For a zoned target, the number of zones should be updated for the * correct value to be exposed in sysfs queue/nr_zones. For a BIO based - * target, this is all that is needed. For a request based target, the - * queue zone bitmaps must also be updated. - * Use blk_revalidate_disk_zones() to handle this. + * target, this is all that is needed. */ - if (blk_queue_is_zoned(q)) - blk_revalidate_disk_zones(t->md->disk); +#ifdef CONFIG_BLK_DEV_ZONED + if (blk_queue_is_zoned(q)) { + WARN_ON_ONCE(queue_is_mq(q)); + q->nr_zones = blkdev_nr_zones(t->md->disk); + } +#endif /* Allow reads to exceed readahead limits */ q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 503c4d4c5884..47eb22a3b7f9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -375,11 +375,6 @@ static inline unsigned int blkdev_nr_zones(struct gendisk *disk) return 0; } -static inline int blk_revalidate_disk_zones(struct gendisk *disk) -{ - return 0; -} - static inline int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) From 6c6b3549142255c3fe4bab5560efdf8391c8d858 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 3 Dec 2019 10:39:08 +0100 Subject: [PATCH 19/33] block: set the zone size in blk_revalidate_disk_zones atomically MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current zone revalidation code has a major problem in that it doesn't update the zone size and q->nr_zones atomically, leading to a short window where an out of bounds access to the zone arrays is possible. To fix this move the setting of the zone size into the crticial sections blk_revalidate_disk_zones so that it gets updated together with the zone bitmaps and q->nr_zones. This also slightly simplifies the caller as it deducts the zone size from the report_zones. This change also allows to check for a power of two zone size in generic code. Reported-by: Hans Holmberg Reviewed-by: Javier González Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-zoned.c | 59 ++++++++++++++++++++--------------- drivers/block/null_blk_main.c | 3 +- drivers/scsi/sd_zbc.c | 2 -- 3 files changed, 35 insertions(+), 29 deletions(-) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 51d427659ce7..d00fcfd71dfe 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -343,6 +343,7 @@ struct blk_revalidate_zone_args { unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; unsigned int nr_zones; + sector_t zone_sectors; sector_t sector; }; @@ -355,25 +356,33 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, struct blk_revalidate_zone_args *args = data; struct gendisk *disk = args->disk; struct request_queue *q = disk->queue; - sector_t zone_sectors = blk_queue_zone_sectors(q); sector_t capacity = get_capacity(disk); /* * All zones must have the same size, with the exception on an eventual * smaller last zone. */ - if (zone->start + zone_sectors < capacity && - zone->len != zone_sectors) { - pr_warn("%s: Invalid zoned device with non constant zone size\n", - disk->disk_name); - return false; - } + if (zone->start == 0) { + if (zone->len == 0 || !is_power_of_2(zone->len)) { + pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n", + disk->disk_name, zone->len); + return -ENODEV; + } - if (zone->start + zone->len >= capacity && - zone->len > zone_sectors) { - pr_warn("%s: Invalid zoned device with larger last zone size\n", - disk->disk_name); - return -ENODEV; + args->zone_sectors = zone->len; + args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len); + } else if (zone->start + args->zone_sectors < capacity) { + if (zone->len != args->zone_sectors) { + pr_warn("%s: Invalid zoned device with non constant zone size\n", + disk->disk_name); + return -ENODEV; + } + } else { + if (zone->len > args->zone_sectors) { + pr_warn("%s: Invalid zoned device with larger last zone size\n", + disk->disk_name); + return -ENODEV; + } } /* Check for holes in the zone report */ @@ -428,9 +437,9 @@ int blk_revalidate_disk_zones(struct gendisk *disk) struct request_queue *q = disk->queue; struct blk_revalidate_zone_args args = { .disk = disk, - .nr_zones = blkdev_nr_zones(disk), }; - int ret = 0; + unsigned int noio_flag; + int ret; if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) return -EIO; @@ -438,24 +447,22 @@ int blk_revalidate_disk_zones(struct gendisk *disk) return -EIO; /* - * Ensure that all memory allocations in this context are done as - * if GFP_NOIO was specified. + * Ensure that all memory allocations in this context are done as if + * GFP_NOIO was specified. */ - if (args.nr_zones) { - unsigned int noio_flag = memalloc_noio_save(); - - ret = disk->fops->report_zones(disk, 0, args.nr_zones, - blk_revalidate_zone_cb, &args); - memalloc_noio_restore(noio_flag); - } + noio_flag = memalloc_noio_save(); + ret = disk->fops->report_zones(disk, 0, UINT_MAX, + blk_revalidate_zone_cb, &args); + memalloc_noio_restore(noio_flag); /* - * Install the new bitmaps, making sure the queue is stopped and - * all I/Os are completed (i.e. a scheduler is not referencing the - * bitmaps). + * Install the new bitmaps and update nr_zones only once the queue is + * stopped and all I/Os are completed (i.e. a scheduler is not + * referencing the bitmaps). */ blk_mq_freeze_queue(q); if (ret >= 0) { + blk_queue_chunk_sectors(q, args.zone_sectors); q->nr_zones = args.nr_zones; swap(q->seq_zones_wlock, args.seq_zones_wlock); swap(q->conv_zones_bitmap, args.conv_zones_bitmap); diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 068cd0ae6e2c..997b7dc095b9 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1583,6 +1583,8 @@ static int null_gendisk_register(struct nullb *nullb) if (ret) return ret; } else { + blk_queue_chunk_sectors(nullb->q, + nullb->dev->zone_size_sects); nullb->q->nr_zones = blkdev_nr_zones(disk); } } @@ -1746,7 +1748,6 @@ static int null_add_dev(struct nullb_device *dev) if (rv) goto out_cleanup_blk_queue; - blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects); nullb->q->limits.zoned = BLK_ZONED_HM; blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q); blk_queue_required_elevator_features(nullb->q, diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 0e5ede48f045..27d72c1d4654 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -412,8 +412,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) goto err; /* The drive satisfies the kernel restrictions: set it up */ - blk_queue_chunk_sectors(sdkp->disk->queue, - logical_to_sectors(sdkp->device, zone_blocks)); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, sdkp->disk->queue); blk_queue_required_elevator_features(sdkp->disk->queue, ELEVATOR_F_ZBD_SEQ_WRITE); From 87f80d623c6c93c721b2aaead8a45e848bc8ffbf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 3 Dec 2019 11:23:54 -0700 Subject: [PATCH 20/33] io_uring: handle connect -EINPROGRESS like -EAGAIN Right now we return it to userspace, which means the application has to poll for the socket to be writeable. Let's just treat it like -EAGAIN and have io_uring handle it internally, this makes it much easier to use. Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index f7985f270d4a..6c22a277904e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2242,7 +2242,7 @@ static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = __sys_connect_file(req->file, &io->connect.address, addr_len, file_flags); - if (ret == -EAGAIN && force_nonblock) { + if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) { io = kmalloc(sizeof(*io), GFP_KERNEL); if (!io) { ret = -ENOMEM; From f9bd84a8a845d82f9b5a081a7ae68c98a11d2e84 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 26 Nov 2019 16:36:05 +0100 Subject: [PATCH 21/33] xen/blkback: Avoid unmapping unmapped grant pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For each I/O request, blkback first maps the foreign pages for the request to its local pages. If an allocation of a local page for the mapping fails, it should unmap every mapping already made for the request. However, blkback's handling mechanism for the allocation failure does not mark the remaining foreign pages as unmapped. Therefore, the unmap function merely tries to unmap every valid grant page for the request, including the pages not mapped due to the allocation failure. On a system that fails the allocation frequently, this problem leads to following kernel crash. [ 372.012538] BUG: unable to handle kernel NULL pointer dereference at 0000000000000001 [ 372.012546] IP: [] gnttab_unmap_refs.part.7+0x1c/0x40 [ 372.012557] PGD 16f3e9067 PUD 16426e067 PMD 0 [ 372.012562] Oops: 0002 [#1] SMP [ 372.012566] Modules linked in: act_police sch_ingress cls_u32 ... [ 372.012746] Call Trace: [ 372.012752] [] gnttab_unmap_refs+0x34/0x40 [ 372.012759] [] xen_blkbk_unmap+0x83/0x150 [xen_blkback] ... [ 372.012802] [] dispatch_rw_block_io+0x970/0x980 [xen_blkback] ... Decompressing Linux... Parsing ELF... done. Booting the kernel. [ 0.000000] Initializing cgroup subsys cpuset This commit fixes this problem by marking the grant pages of the given request that didn't mapped due to the allocation failure as invalid. Fixes: c6cc142dac52 ("xen-blkback: use balloon pages for all mappings") Reviewed-by: David Woodhouse Reviewed-by: Maximilian Heyne Reviewed-by: Paul Durrant Reviewed-by: Roger Pau Monné Signed-off-by: SeongJae Park Signed-off-by: Jens Axboe --- drivers/block/xen-blkback/blkback.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index fd1e19f1a49f..3666afa639d1 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -936,6 +936,8 @@ next: out_of_memory: pr_alert("%s: out of memory\n", __func__); put_free_pages(ring, pages_to_gnt, segs_to_map); + for (i = last_map; i < num; i++) + pages[i]->handle = BLKBACK_INVALID_HANDLE; return -ENOMEM; } From 36582a5a456100ebe4983e3d63b8cbc7e62a0ddc Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Wed, 4 Dec 2019 19:31:14 +0800 Subject: [PATCH 22/33] brd: remove max_hw_sectors queue limit Now we depend on blk_queue_split() to respect most of queue limit (the only one exception could be dma alignment), however blk_queue_split() isn't used for brd, so this limit isn't respected since v4.3. Also max_hw_sectors limit doesn't play a big role for brd, which is added since brd is added to tree for unknown reason. So remove it. Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- drivers/block/brd.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index c548a5a6c1a0..c2e5b2ad88bc 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -382,7 +382,6 @@ static struct brd_device *brd_alloc(int i) goto out_free_dev; blk_queue_make_request(brd->brd_queue, brd_make_request); - blk_queue_max_hw_sectors(brd->brd_queue, 1024); /* This is so fdisk will align partitions on 4k, because of * direct_access API needing 4k alignment, returning a PFN From f1acbf2186dfe761a05ce35c0f36246caed44403 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Wed, 4 Dec 2019 19:31:15 +0800 Subject: [PATCH 23/33] brd: warn on un-aligned buffer Queue dma alignment limit requires users(fs, target, ...) of block layer to pass aligned buffer. So far brd doesn't support un-aligned buffer, even though it is easy to support it. However, given brd is often used for debug purpose, and there are other drivers which can't support un-aligned buffer too. So add warning so that brd users know what to fix. Reported-by: Stephen Rust Cc: Stephen Rust Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- drivers/block/brd.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index c2e5b2ad88bc..a8730cc4db10 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -297,6 +297,10 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) unsigned int len = bvec.bv_len; int err; + /* Don't support un-aligned buffer */ + WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) || + (len & (SECTOR_SIZE - 1))); + err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, bio_op(bio), sector); if (err) From bca1c43cb2dbe4212aea0793bfd91aeb4c2d184d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 4 Dec 2019 09:17:41 -0700 Subject: [PATCH 24/33] null_blk: remove unused variable warning on !CONFIG_BLK_DEV_ZONED If BLK_DEV_ZONED isn't set, 'ret' isn't used. This makes gcc complain, rightfully. Move ret where it is used. Fixes: 979d54475e0b ("null_blk: cleanup null_gendisk_register") Signed-off-by: Jens Axboe --- drivers/block/null_blk_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 997b7dc095b9..ae8d4bc532b0 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1561,7 +1561,6 @@ static int null_gendisk_register(struct nullb *nullb) { sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT; struct gendisk *disk; - int ret; disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node); if (!disk) @@ -1579,7 +1578,7 @@ static int null_gendisk_register(struct nullb *nullb) #ifdef CONFIG_BLK_DEV_ZONED if (nullb->dev->zoned) { if (queue_is_mq(nullb->q)) { - ret = blk_revalidate_disk_zones(disk); + int ret = blk_revalidate_disk_zones(disk); if (ret) return ret; } else { From 901e59bba9ddad4bc6994ecb8598ea60a993da4c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 4 Dec 2019 10:34:03 -0700 Subject: [PATCH 25/33] io_uring: allow IO_SQE_* flags on IORING_OP_TIMEOUT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's really no reason why we forbid things like link/drain etc on regular timeout commands. Enable the usual SQE flags on timeouts. Reported-by: 李通洲 Signed-off-by: Jens Axboe --- fs/io_uring.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 6c22a277904e..00f119bdd8ff 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2703,9 +2703,6 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) int ret; ret = io_timeout_setup(req); - /* common setup allows flags (like links) set, we don't */ - if (!ret && sqe->flags) - ret = -EINVAL; if (ret) return ret; From 2d28390aff879238f00e209e38c2a0b78717360e Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 4 Dec 2019 11:08:05 -0700 Subject: [PATCH 26/33] io_uring: ensure deferred timeouts copy necessary data If we defer a timeout, we should ensure that we copy the timespec when we have consumed the sqe. This is similar to commit f67676d160c6 for read/write requests. We already did this correctly for timeouts deferred as links, but do it generally and use the infrastructure added by commit 1a6b74fc8702 instead of having the timeout deferral use its own. Signed-off-by: Jens Axboe --- fs/io_uring.c | 83 ++++++++++++++++++++++++++------------------------- 1 file changed, 42 insertions(+), 41 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 00f119bdd8ff..2efe1ac7352a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -303,11 +303,6 @@ struct io_timeout_data { u32 seq_offset; }; -struct io_timeout { - struct file *file; - struct io_timeout_data *data; -}; - struct io_async_connect { struct sockaddr_storage address; }; @@ -332,6 +327,7 @@ struct io_async_ctx { struct io_async_rw rw; struct io_async_msghdr msg; struct io_async_connect connect; + struct io_timeout_data timeout; }; }; @@ -346,7 +342,6 @@ struct io_kiocb { struct file *file; struct kiocb rw; struct io_poll_iocb poll; - struct io_timeout timeout; }; const struct io_uring_sqe *sqe; @@ -619,7 +614,7 @@ static void io_kill_timeout(struct io_kiocb *req) { int ret; - ret = hrtimer_try_to_cancel(&req->timeout.data->timer); + ret = hrtimer_try_to_cancel(&req->io->timeout.timer); if (ret != -1) { atomic_inc(&req->ctx->cq_timeouts); list_del_init(&req->list); @@ -877,8 +872,6 @@ static void __io_free_req(struct io_kiocb *req) wake_up(&ctx->inflight_wait); spin_unlock_irqrestore(&ctx->inflight_lock, flags); } - if (req->flags & REQ_F_TIMEOUT) - kfree(req->timeout.data); percpu_ref_put(&ctx->refs); if (likely(!io_is_fallback_req(req))) kmem_cache_free(req_cachep, req); @@ -891,7 +884,7 @@ static bool io_link_cancel_timeout(struct io_kiocb *req) struct io_ring_ctx *ctx = req->ctx; int ret; - ret = hrtimer_try_to_cancel(&req->timeout.data->timer); + ret = hrtimer_try_to_cancel(&req->io->timeout.timer); if (ret != -1) { io_cqring_fill_event(req, -ECANCELED); io_commit_cqring(ctx); @@ -2618,7 +2611,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) if (ret == -ENOENT) return ret; - ret = hrtimer_try_to_cancel(&req->timeout.data->timer); + ret = hrtimer_try_to_cancel(&req->io->timeout.timer); if (ret == -1) return -EALREADY; @@ -2660,7 +2653,8 @@ static int io_timeout_remove(struct io_kiocb *req, return 0; } -static int io_timeout_setup(struct io_kiocb *req) +static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io, + bool is_timeout_link) { const struct io_uring_sqe *sqe = req->sqe; struct io_timeout_data *data; @@ -2670,15 +2664,14 @@ static int io_timeout_setup(struct io_kiocb *req) return -EINVAL; if (sqe->ioprio || sqe->buf_index || sqe->len != 1) return -EINVAL; + if (sqe->off && is_timeout_link) + return -EINVAL; flags = READ_ONCE(sqe->timeout_flags); if (flags & ~IORING_TIMEOUT_ABS) return -EINVAL; - data = kzalloc(sizeof(struct io_timeout_data), GFP_KERNEL); - if (!data) - return -ENOMEM; + data = &io->timeout; data->req = req; - req->timeout.data = data; req->flags |= REQ_F_TIMEOUT; if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) @@ -2690,6 +2683,7 @@ static int io_timeout_setup(struct io_kiocb *req) data->mode = HRTIMER_MODE_REL; hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); + req->io = io; return 0; } @@ -2698,13 +2692,24 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) unsigned count; struct io_ring_ctx *ctx = req->ctx; struct io_timeout_data *data; + struct io_async_ctx *io; struct list_head *entry; unsigned span = 0; - int ret; - ret = io_timeout_setup(req); - if (ret) - return ret; + io = req->io; + if (!io) { + int ret; + + io = kmalloc(sizeof(*io), GFP_KERNEL); + if (!io) + return -ENOMEM; + ret = io_timeout_prep(req, io, false); + if (ret) { + kfree(io); + return ret; + } + } + data = &req->io->timeout; /* * sqe->off holds how many events that need to occur for this @@ -2720,7 +2725,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) } req->sequence = ctx->cached_sq_head + count - 1; - req->timeout.data->seq_offset = count; + data->seq_offset = count; /* * Insertion sort, ensuring the first entry in the list is always @@ -2731,7 +2736,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); unsigned nxt_sq_head; long long tmp, tmp_nxt; - u32 nxt_offset = nxt->timeout.data->seq_offset; + u32 nxt_offset = nxt->io->timeout.seq_offset; if (nxt->flags & REQ_F_TIMEOUT_NOSEQ) continue; @@ -2764,7 +2769,6 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) req->sequence -= span; add: list_add(&req->list, entry); - data = req->timeout.data; data->timer.function = io_timeout_fn; hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); spin_unlock_irq(&ctx->completion_lock); @@ -2872,6 +2876,10 @@ static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io) case IORING_OP_CONNECT: ret = io_connect_prep(req, io); break; + case IORING_OP_TIMEOUT: + return io_timeout_prep(req, io, false); + case IORING_OP_LINK_TIMEOUT: + return io_timeout_prep(req, io, true); default: req->io = io; return 0; @@ -2899,17 +2907,18 @@ static int io_req_defer(struct io_kiocb *req) if (!io) return -EAGAIN; + ret = io_req_defer_prep(req, io); + if (ret < 0) { + kfree(io); + return ret; + } + spin_lock_irq(&ctx->completion_lock); if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { spin_unlock_irq(&ctx->completion_lock); - kfree(io); return 0; } - ret = io_req_defer_prep(req, io); - if (ret < 0) - return ret; - trace_io_uring_defer(ctx, req, req->user_data); list_add_tail(&req->list, &ctx->defer_list); spin_unlock_irq(&ctx->completion_lock); @@ -3198,7 +3207,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req) */ spin_lock_irq(&ctx->completion_lock); if (!list_empty(&req->list)) { - struct io_timeout_data *data = req->timeout.data; + struct io_timeout_data *data = &req->io->timeout; data->timer.function = io_link_timeout_fn; hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), @@ -3345,17 +3354,6 @@ err_req: if (req->sqe->flags & IOSQE_IO_DRAIN) (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN; - if (READ_ONCE(req->sqe->opcode) == IORING_OP_LINK_TIMEOUT) { - ret = io_timeout_setup(req); - /* common setup allows offset being set, we don't */ - if (!ret && req->sqe->off) - ret = -EINVAL; - if (ret) { - prev->flags |= REQ_F_FAIL_LINK; - goto err_req; - } - } - io = kmalloc(sizeof(*io), GFP_KERNEL); if (!io) { ret = -EAGAIN; @@ -3363,8 +3361,11 @@ err_req: } ret = io_req_defer_prep(req, io); - if (ret) + if (ret) { + kfree(io); + prev->flags |= REQ_F_FAIL_LINK; goto err_req; + } trace_io_uring_link(ctx, req, prev); list_add_tail(&req->list, &prev->link_list); } else if (req->sqe->flags & IOSQE_IO_LINK) { From 08bdcc35f00c91b325195723cceaba0937a89ddf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 4 Dec 2019 17:19:44 -0700 Subject: [PATCH 27/33] io-wq: clear node->next on list deletion If someone removes a node from a list, and then later adds it back to a list, we can have invalid data in ->next. This can cause all sorts of issues. One such use case is the IORING_OP_POLL_ADD command, which will do just that if we race and get woken twice without any pending events. This is a pretty rare case, but can happen under extreme loads. Dan reports that he saw the following crash: BUG: kernel NULL pointer dereference, address: 0000000000000000 PGD d283ce067 P4D d283ce067 PUD e5ca04067 PMD 0 Oops: 0002 [#1] SMP CPU: 17 PID: 10726 Comm: tao:fast-fiber Kdump: loaded Not tainted 5.2.9-02851-gac7bc042d2d1 #116 Hardware name: Quanta Twin Lakes MP/Twin Lakes Passive MP, BIOS F09_3A17 05/03/2019 RIP: 0010:io_wqe_enqueue+0x3e/0xd0 Code: 34 24 74 55 8b 47 58 48 8d 6f 50 85 c0 74 50 48 89 df e8 35 7c 75 00 48 83 7b 08 00 48 8b 14 24 0f 84 84 00 00 00 48 8b 4b 10 <48> 89 11 48 89 53 10 83 63 20 fe 48 89 c6 48 89 df e8 0c 7a 75 00 RSP: 0000:ffffc90006858a08 EFLAGS: 00010082 RAX: 0000000000000002 RBX: ffff889037492fc0 RCX: 0000000000000000 RDX: ffff888e40cc11a8 RSI: ffff888e40cc11a8 RDI: ffff889037492fc0 RBP: ffff889037493010 R08: 00000000000000c3 R09: ffffc90006858ab8 R10: 0000000000000000 R11: 0000000000000000 R12: ffff888e40cc11a8 R13: 0000000000000000 R14: 00000000000000c3 R15: ffff888e40cc1100 FS: 00007fcddc9db700(0000) GS:ffff88903fa40000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 0000000e479f5003 CR4: 00000000007606e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: io_poll_wake+0x12f/0x2a0 __wake_up_common+0x86/0x120 __wake_up_common_lock+0x7a/0xc0 sock_def_readable+0x3c/0x70 tcp_rcv_established+0x557/0x630 tcp_v6_do_rcv+0x118/0x3c0 tcp_v6_rcv+0x97e/0x9d0 ip6_protocol_deliver_rcu+0xe3/0x440 ip6_input+0x3d/0xc0 ? ip6_protocol_deliver_rcu+0x440/0x440 ipv6_rcv+0x56/0xd0 ? ip6_rcv_finish_core.isra.18+0x80/0x80 __netif_receive_skb_one_core+0x50/0x70 netif_receive_skb_internal+0x2f/0xa0 napi_gro_receive+0x125/0x150 mlx5e_handle_rx_cqe+0x1d9/0x5a0 ? mlx5e_poll_tx_cq+0x305/0x560 mlx5e_poll_rx_cq+0x49f/0x9c5 mlx5e_napi_poll+0xee/0x640 ? smp_reschedule_interrupt+0x16/0xd0 ? reschedule_interrupt+0xf/0x20 net_rx_action+0x286/0x3d0 __do_softirq+0xca/0x297 irq_exit+0x96/0xa0 do_IRQ+0x54/0xe0 common_interrupt+0xf/0xf RIP: 0033:0x7fdc627a2e3a Code: 31 c0 85 d2 0f 88 f6 00 00 00 55 48 89 e5 41 57 41 56 4c 63 f2 41 55 41 54 53 48 83 ec 18 48 85 ff 0f 84 c7 00 00 00 48 8b 07 <41> 89 d4 49 89 f5 48 89 fb 48 85 c0 0f 84 64 01 00 00 48 83 78 10 when running a networked workload with about 5000 sockets being polled for. Fix this by clearing node->next when the node is being removed from the list. Fixes: 6206f0e180d4 ("io-wq: shrink io_wq_work a bit") Reported-by: Dan Melnic Signed-off-by: Jens Axboe --- fs/io-wq.h | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/io-wq.h b/fs/io-wq.h index 892db0bb64b1..7c333a28e2a7 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -52,6 +52,7 @@ static inline void wq_node_del(struct io_wq_work_list *list, list->last = prev; if (prev) prev->next = node->next; + node->next = NULL; } #define wq_list_for_each(pos, prv, head) \ From 78076bb64aa8ba5b7207c38b2660a9e10ffa8cc7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 4 Dec 2019 19:56:40 -0700 Subject: [PATCH 28/33] io_uring: use hash table for poll command lookups We recently changed this from a single list to an rbtree, but for some real life workloads, the rbtree slows down the submission/insertion case enough so that it's the top cycle consumer on the io_uring side. In testing, using a hash table is a more well rounded compromise. It is fast for insertion, and as long as it's sized appropriately, it works well for the cancellation case as well. Running TAO with a lot of network sockets, this removes io_poll_req_insert() from spending 2% of the CPU cycles. Reported-by: Dan Melnic Signed-off-by: Jens Axboe --- fs/io_uring.c | 82 +++++++++++++++++++++++++-------------------------- 1 file changed, 40 insertions(+), 42 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 2efe1ac7352a..8fa6b190a238 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -275,7 +275,8 @@ struct io_ring_ctx { * manipulate the list, hence no extra locking is needed there. */ struct list_head poll_list; - struct rb_root cancel_tree; + struct hlist_head *cancel_hash; + unsigned cancel_hash_bits; spinlock_t inflight_lock; struct list_head inflight_list; @@ -355,7 +356,7 @@ struct io_kiocb { struct io_ring_ctx *ctx; union { struct list_head list; - struct rb_node rb_node; + struct hlist_node hash_node; }; struct list_head link_list; unsigned int flags; @@ -444,6 +445,7 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref) static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) { struct io_ring_ctx *ctx; + int hash_bits; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -457,6 +459,21 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) if (!ctx->completions) goto err; + /* + * Use 5 bits less than the max cq entries, that should give us around + * 32 entries per hash list if totally full and uniformly spread. + */ + hash_bits = ilog2(p->cq_entries); + hash_bits -= 5; + if (hash_bits <= 0) + hash_bits = 1; + ctx->cancel_hash_bits = hash_bits; + ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head), + GFP_KERNEL); + if (!ctx->cancel_hash) + goto err; + __hash_init(ctx->cancel_hash, 1U << hash_bits); + if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) goto err; @@ -470,7 +487,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) init_waitqueue_head(&ctx->wait); spin_lock_init(&ctx->completion_lock); INIT_LIST_HEAD(&ctx->poll_list); - ctx->cancel_tree = RB_ROOT; INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->timeout_list); init_waitqueue_head(&ctx->inflight_wait); @@ -481,6 +497,7 @@ err: if (ctx->fallback_req) kmem_cache_free(req_cachep, ctx->fallback_req); kfree(ctx->completions); + kfree(ctx->cancel_hash); kfree(ctx); return NULL; } @@ -2260,14 +2277,6 @@ out: #endif } -static inline void io_poll_remove_req(struct io_kiocb *req) -{ - if (!RB_EMPTY_NODE(&req->rb_node)) { - rb_erase(&req->rb_node, &req->ctx->cancel_tree); - RB_CLEAR_NODE(&req->rb_node); - } -} - static void io_poll_remove_one(struct io_kiocb *req) { struct io_poll_iocb *poll = &req->poll; @@ -2279,36 +2288,34 @@ static void io_poll_remove_one(struct io_kiocb *req) io_queue_async_work(req); } spin_unlock(&poll->head->lock); - io_poll_remove_req(req); + hash_del(&req->hash_node); } static void io_poll_remove_all(struct io_ring_ctx *ctx) { - struct rb_node *node; + struct hlist_node *tmp; struct io_kiocb *req; + int i; spin_lock_irq(&ctx->completion_lock); - while ((node = rb_first(&ctx->cancel_tree)) != NULL) { - req = rb_entry(node, struct io_kiocb, rb_node); - io_poll_remove_one(req); + for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { + struct hlist_head *list; + + list = &ctx->cancel_hash[i]; + hlist_for_each_entry_safe(req, tmp, list, hash_node) + io_poll_remove_one(req); } spin_unlock_irq(&ctx->completion_lock); } static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) { - struct rb_node *p, *parent = NULL; + struct hlist_head *list; struct io_kiocb *req; - p = ctx->cancel_tree.rb_node; - while (p) { - parent = p; - req = rb_entry(parent, struct io_kiocb, rb_node); - if (sqe_addr < req->user_data) { - p = p->rb_left; - } else if (sqe_addr > req->user_data) { - p = p->rb_right; - } else { + list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)]; + hlist_for_each_entry(req, list, hash_node) { + if (sqe_addr == req->user_data) { io_poll_remove_one(req); return 0; } @@ -2390,7 +2397,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr) spin_unlock_irq(&ctx->completion_lock); return; } - io_poll_remove_req(req); + hash_del(&req->hash_node); io_poll_complete(req, mask, ret); spin_unlock_irq(&ctx->completion_lock); @@ -2425,7 +2432,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, * for finalizing the request, mark us as having grabbed that already. */ if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) { - io_poll_remove_req(req); + hash_del(&req->hash_node); io_poll_complete(req, mask, 0); req->flags |= REQ_F_COMP_LOCKED; io_put_req(req); @@ -2463,20 +2470,10 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, static void io_poll_req_insert(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - struct rb_node **p = &ctx->cancel_tree.rb_node; - struct rb_node *parent = NULL; - struct io_kiocb *tmp; + struct hlist_head *list; - while (*p) { - parent = *p; - tmp = rb_entry(parent, struct io_kiocb, rb_node); - if (req->user_data < tmp->user_data) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&req->rb_node, parent, p); - rb_insert_color(&req->rb_node, &ctx->cancel_tree); + list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; + hlist_add_head(&req->hash_node, list); } static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, @@ -2504,7 +2501,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, INIT_IO_WORK(&req->work, io_poll_complete_work); events = READ_ONCE(sqe->poll_events); poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; - RB_CLEAR_NODE(&req->rb_node); + INIT_HLIST_NODE(&req->hash_node); poll->head = NULL; poll->done = false; @@ -4644,6 +4641,7 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) free_uid(ctx->user); put_cred(ctx->creds); kfree(ctx->completions); + kfree(ctx->cancel_hash); kmem_cache_free(req_cachep, ctx->fallback_req); kfree(ctx); } From 2e6e1fde32d7d41cf076c21060c329d3fdbce25c Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 5 Dec 2019 16:15:45 +0300 Subject: [PATCH 29/33] io_uring: fix error handling in io_queue_link_head In case of an error io_submit_sqe() drops a request and continues without it, even if the request was a part of a link. Not only it doesn't cancel links, but also may execute wrong sequence of actions. Stop consuming sqes, and let the user handle errors. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 8fa6b190a238..1fca9e2b6e64 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3315,7 +3315,7 @@ static inline void io_queue_link_head(struct io_kiocb *req) #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK) -static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state, +static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state, struct io_kiocb **link) { struct io_ring_ctx *ctx = req->ctx; @@ -3334,7 +3334,7 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state, err_req: io_cqring_add_event(req, ret); io_double_put_req(req); - return; + return false; } /* @@ -3373,6 +3373,8 @@ err_req: } else { io_queue_sqe(req); } + + return true; } /* @@ -3502,6 +3504,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, } } + submitted++; sqe_flags = req->sqe->flags; req->ring_file = ring_file; @@ -3511,9 +3514,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, req->needs_fixed_file = async; trace_io_uring_submit_sqe(ctx, req->sqe->user_data, true, async); - io_submit_sqe(req, statep, &link); - submitted++; - + if (!io_submit_sqe(req, statep, &link)) + break; /* * If previous wasn't linked and we have a linked command, * that's the end of the chain. Submit the previous link. From 4493233edcfc0ad0a7f76f1c83f95b1bcf280547 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 5 Dec 2019 16:16:35 +0300 Subject: [PATCH 30/33] io_uring: hook all linked requests via link_list Links are created by chaining requests through req->list with an exception that head uses req->link_list. (e.g. link_list->list->list) Because of that, io_req_link_next() needs complex splicing to advance. Link them all through list_list. Also, it seems to be simpler and more consistent IMHO. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 1fca9e2b6e64..c838705c9a5a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -916,7 +916,6 @@ static bool io_link_cancel_timeout(struct io_kiocb *req) static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr) { struct io_ring_ctx *ctx = req->ctx; - struct io_kiocb *nxt; bool wake_ev = false; /* Already got next link */ @@ -928,24 +927,21 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr) * potentially happen if the chain is messed up, check to be on the * safe side. */ - nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list); - while (nxt) { - list_del_init(&nxt->list); + while (!list_empty(&req->link_list)) { + struct io_kiocb *nxt = list_first_entry(&req->link_list, + struct io_kiocb, link_list); - if ((req->flags & REQ_F_LINK_TIMEOUT) && - (nxt->flags & REQ_F_TIMEOUT)) { + if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) && + (nxt->flags & REQ_F_TIMEOUT))) { + list_del_init(&nxt->link_list); wake_ev |= io_link_cancel_timeout(nxt); - nxt = list_first_entry_or_null(&req->link_list, - struct io_kiocb, list); req->flags &= ~REQ_F_LINK_TIMEOUT; continue; } - if (!list_empty(&req->link_list)) { - INIT_LIST_HEAD(&nxt->link_list); - list_splice(&req->link_list, &nxt->link_list); - nxt->flags |= REQ_F_LINK; - } + list_del_init(&req->link_list); + if (!list_empty(&nxt->link_list)) + nxt->flags |= REQ_F_LINK; *nxtptr = nxt; break; } @@ -961,15 +957,15 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr) static void io_fail_links(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - struct io_kiocb *link; unsigned long flags; spin_lock_irqsave(&ctx->completion_lock, flags); while (!list_empty(&req->link_list)) { - link = list_first_entry(&req->link_list, struct io_kiocb, list); - list_del_init(&link->list); + struct io_kiocb *link = list_first_entry(&req->link_list, + struct io_kiocb, link_list); + list_del_init(&link->link_list); trace_io_uring_fail_link(req, link); if ((req->flags & REQ_F_LINK_TIMEOUT) && @@ -3170,10 +3166,11 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) * We don't expect the list to be empty, that will only happen if we * race with the completion of the linked work. */ - if (!list_empty(&req->list)) { - prev = list_entry(req->list.prev, struct io_kiocb, link_list); + if (!list_empty(&req->link_list)) { + prev = list_entry(req->link_list.prev, struct io_kiocb, + link_list); if (refcount_inc_not_zero(&prev->refs)) { - list_del_init(&req->list); + list_del_init(&req->link_list); prev->flags &= ~REQ_F_LINK_TIMEOUT; } else prev = NULL; @@ -3203,7 +3200,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req) * we got a chance to setup the timer */ spin_lock_irq(&ctx->completion_lock); - if (!list_empty(&req->list)) { + if (!list_empty(&req->link_list)) { struct io_timeout_data *data = &req->io->timeout; data->timer.function = io_link_timeout_fn; @@ -3223,7 +3220,8 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) if (!(req->flags & REQ_F_LINK)) return NULL; - nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list); + nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, + link_list); if (!nxt || nxt->sqe->opcode != IORING_OP_LINK_TIMEOUT) return NULL; @@ -3364,7 +3362,7 @@ err_req: goto err_req; } trace_io_uring_link(ctx, req, prev); - list_add_tail(&req->list, &prev->link_list); + list_add_tail(&req->link_list, &prev->link_list); } else if (req->sqe->flags & IOSQE_IO_LINK) { req->flags |= REQ_F_LINK; From 08802ed665e47243393f43501bdafa032112eb1c Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Thu, 5 Dec 2019 20:53:11 +0800 Subject: [PATCH 31/33] bfq-iosched: Ensure bio->bi_blkg is valid before using it bio->bi_blkg will be NULL when the issue of the request has bypassed the block layer as shown in the following oops: Internal error: Oops: 96000005 [#1] SMP CPU: 17 PID: 2996 Comm: scsi_id Not tainted 5.4.0 #4 Call trace: percpu_counter_add_batch+0x38/0x4c8 bfqg_stats_update_legacy_io+0x9c/0x280 bfq_insert_requests+0xbac/0x2190 blk_mq_sched_insert_request+0x288/0x670 blk_execute_rq_nowait+0x140/0x178 blk_execute_rq+0x8c/0x140 sg_io+0x604/0x9c0 scsi_cmd_ioctl+0xe38/0x10a8 scsi_cmd_blk_ioctl+0xac/0xe8 sd_ioctl+0xe4/0x238 blkdev_ioctl+0x590/0x20e0 block_ioctl+0x60/0x98 do_vfs_ioctl+0xe0/0x1b58 ksys_ioctl+0x80/0xd8 __arm64_sys_ioctl+0x40/0x78 el0_svc_handler+0xc4/0x270 so ensure its validity before using it. Fixes: fd41e60331b1 ("bfq-iosched: stop using blkg->stat_bytes and ->stat_ios") Signed-off-by: Hou Tao Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index cea0ae12f937..e1419edde2ec 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -351,6 +351,9 @@ void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq) { struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg); + if (!bfqg) + return; + blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq)); blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1); } From 0b4295b5e2b9b42f3f3096496fe4775b656c9ba6 Mon Sep 17 00:00:00 2001 From: LimingWu <19092205@suning.com> Date: Thu, 5 Dec 2019 20:18:18 +0800 Subject: [PATCH 32/33] io_uring: fix a typo in a comment thatn -> than. Signed-off-by: Liming Wu <19092205@suning.com> Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c838705c9a5a..405be10da73d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -145,7 +145,7 @@ struct io_rings { /* * Number of completion events lost because the queue was full; * this should be avoided by the application by making sure - * there are not more requests pending thatn there is space in + * there are not more requests pending than there is space in * the completion queue. * * Written by the kernel, shouldn't be modified by the From ece841abbed2da71fa10710c687c9ce9efb6bf69 Mon Sep 17 00:00:00 2001 From: Justin Tee Date: Thu, 5 Dec 2019 10:09:01 +0800 Subject: [PATCH 33/33] block: fix memleak of bio integrity data 7c20f11680a4 ("bio-integrity: stop abusing bi_end_io") moves bio_integrity_free from bio_uninit() to bio_integrity_verify_fn() and bio_endio(). This way looks wrong because bio may be freed without calling bio_endio(), for example, blk_rq_unprep_clone() is called from dm_mq_queue_rq() when the underlying queue of dm-mpath is busy. So memory leak of bio integrity data is caused by commit 7c20f11680a4. Fixes this issue by re-adding bio_integrity_free() to bio_uninit(). Fixes: 7c20f11680a4 ("bio-integrity: stop abusing bi_end_io") Reviewed-by: Christoph Hellwig Signed-off-by Justin Tee Add commit log, and simplify/fix the original patch wroten by Justin. Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/bio-integrity.c | 2 +- block/bio.c | 3 +++ block/blk.h | 4 ++++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/block/bio-integrity.c b/block/bio-integrity.c index fb95dbb21dd8..bf62c25cde8f 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -87,7 +87,7 @@ EXPORT_SYMBOL(bio_integrity_alloc); * Description: Used to free the integrity portion of a bio. Usually * called from bio_free(). */ -static void bio_integrity_free(struct bio *bio) +void bio_integrity_free(struct bio *bio) { struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_set *bs = bio->bi_pool; diff --git a/block/bio.c b/block/bio.c index b1170ec18464..9d54aa37ce6c 100644 --- a/block/bio.c +++ b/block/bio.c @@ -233,6 +233,9 @@ fallback: void bio_uninit(struct bio *bio) { bio_disassociate_blkg(bio); + + if (bio_integrity(bio)) + bio_integrity_free(bio); } EXPORT_SYMBOL(bio_uninit); diff --git a/block/blk.h b/block/blk.h index 2bea40180b6f..6842f28c033e 100644 --- a/block/blk.h +++ b/block/blk.h @@ -121,6 +121,7 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, #ifdef CONFIG_BLK_DEV_INTEGRITY void blk_flush_integrity(void); bool __bio_integrity_endio(struct bio *); +void bio_integrity_free(struct bio *bio); static inline bool bio_integrity_endio(struct bio *bio) { if (bio_integrity(bio)) @@ -166,6 +167,9 @@ static inline bool bio_integrity_endio(struct bio *bio) { return true; } +static inline void bio_integrity_free(struct bio *bio) +{ +} #endif /* CONFIG_BLK_DEV_INTEGRITY */ unsigned long blk_rq_timeout(unsigned long timeout);