io_uring: simplify io_timeout locking

Move spin_lock_irq() earlier to have only 1 call site of it in
io_timeout(). It makes the flow easier.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2020-05-26 20:34:03 +03:00 committed by Jens Axboe
parent 4518a3cc27
commit 733f5c95e6
1 changed files with 1 additions and 2 deletions

View File

@ -4845,6 +4845,7 @@ static int io_timeout(struct io_kiocb *req)
u32 seq = req->sequence;
data = &req->io->timeout;
spin_lock_irq(&ctx->completion_lock);
/*
* sqe->off holds how many events that need to occur for this
@ -4853,7 +4854,6 @@ static int io_timeout(struct io_kiocb *req)
*/
if (!count) {
req->flags |= REQ_F_TIMEOUT_NOSEQ;
spin_lock_irq(&ctx->completion_lock);
entry = ctx->timeout_list.prev;
goto add;
}
@ -4864,7 +4864,6 @@ static int io_timeout(struct io_kiocb *req)
* Insertion sort, ensuring the first entry in the list is always
* the one we need first.
*/
spin_lock_irq(&ctx->completion_lock);
list_for_each_prev(entry, &ctx->timeout_list) {
struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
unsigned nxt_seq;