io_uring: fix race with canceling timeouts

If we get -1 from hrtimer_try_to_cancel(), we know that the timer
is running. Hence leave all completion to the timeout handler. If
we don't, we can corrupt the list and miss a completion.

Fixes: 11365043e5 ("io_uring: add support for canceling timeout requests")
Reported-by: Hrvoje Zeba <zeba.hrvoje@gmail.com>
Tested-by: Hrvoje Zeba <zeba.hrvoje@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2019-10-29 12:34:10 -06:00
parent 65e19f54d2
commit 842f96124c
1 changed files with 8 additions and 12 deletions

View File

@ -533,7 +533,7 @@ static void io_kill_timeout(struct io_kiocb *req)
ret = hrtimer_try_to_cancel(&req->timeout.timer); ret = hrtimer_try_to_cancel(&req->timeout.timer);
if (ret != -1) { if (ret != -1) {
atomic_inc(&req->ctx->cq_timeouts); atomic_inc(&req->ctx->cq_timeouts);
list_del(&req->list); list_del_init(&req->list);
io_cqring_fill_event(req->ctx, req->user_data, 0); io_cqring_fill_event(req->ctx, req->user_data, 0);
__io_free_req(req); __io_free_req(req);
} }
@ -1957,7 +1957,6 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
struct io_ring_ctx *ctx; struct io_ring_ctx *ctx;
struct io_kiocb *req; struct io_kiocb *req;
unsigned long flags; unsigned long flags;
bool comp;
req = container_of(timer, struct io_kiocb, timeout.timer); req = container_of(timer, struct io_kiocb, timeout.timer);
ctx = req->ctx; ctx = req->ctx;
@ -1968,8 +1967,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
* We could be racing with timeout deletion. If the list is empty, * We could be racing with timeout deletion. If the list is empty,
* then timeout lookup already found it and will be handling it. * then timeout lookup already found it and will be handling it.
*/ */
comp = !list_empty(&req->list); if (!list_empty(&req->list)) {
if (comp) {
struct io_kiocb *prev; struct io_kiocb *prev;
/* /*
@ -1981,17 +1979,15 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
prev = req; prev = req;
list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list) list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
prev->sequence++; prev->sequence++;
list_del_init(&req->list); list_del_init(&req->list);
io_cqring_fill_event(ctx, req->user_data, -ETIME);
io_commit_cqring(ctx);
} }
io_cqring_fill_event(ctx, req->user_data, -ETIME);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (comp) { io_cqring_ev_posted(ctx);
io_cqring_ev_posted(ctx); io_put_req(req, NULL);
io_put_req(req, NULL);
}
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
@ -2131,9 +2127,9 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} }
req->sequence -= span; req->sequence -= span;
list_add(&req->list, entry); list_add(&req->list, entry);
spin_unlock_irq(&ctx->completion_lock);
req->timeout.timer.function = io_timeout_fn; req->timeout.timer.function = io_timeout_fn;
hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), mode); hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), mode);
spin_unlock_irq(&ctx->completion_lock);
return 0; return 0;
} }