io_uring: let fast poll support multishot

For operations like accept, multishot is a useful feature, since we can
reduce a number of accept sqe. Let's integrate it to fast poll, it may
be good for other operations in the future.

Signed-off-by: Hao Xu <howeyxu@tencent.com>
Link: https://lore.kernel.org/r/20220514142046.58072-4-haoxu.linux@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Hao Xu 2022-05-14 22:20:45 +08:00 committed by Jens Axboe
parent 227685ebfa
commit dbc2564cfe
1 changed files with 32 additions and 15 deletions

View File

@ -6011,6 +6011,7 @@ static void io_poll_remove_entries(struct io_kiocb *req)
rcu_read_unlock();
}
static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags);
/*
* All poll tw should go through this. Checks for poll events, manages
* references, does rewait, etc.
@ -6019,10 +6020,10 @@ static void io_poll_remove_entries(struct io_kiocb *req)
* either spurious wakeup or multishot CQE is served. 0 when it's done with
* the request, then the mask is stored in req->cqe.res.
*/
static int io_poll_check_events(struct io_kiocb *req, bool locked)
static int io_poll_check_events(struct io_kiocb *req, bool *locked)
{
struct io_ring_ctx *ctx = req->ctx;
int v;
int v, ret;
/* req->task == current here, checking PF_EXITING is safe */
if (unlikely(req->task->flags & PF_EXITING))
@ -6046,23 +6047,37 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
}
/* multishot, just fill an CQE and proceed */
if (req->cqe.res && !(req->apoll_events & EPOLLONESHOT)) {
__poll_t mask = mangle_poll(req->cqe.res & req->apoll_events);
if ((unlikely(!req->cqe.res)))
continue;
if (req->apoll_events & EPOLLONESHOT)
return 0;
/* multishot, just fill a CQE and proceed */
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
__poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events);
bool filled;
spin_lock(&ctx->completion_lock);
filled = io_fill_cqe_aux(ctx, req->cqe.user_data, mask,
IORING_CQE_F_MORE);
filled = io_fill_cqe_aux(ctx, req->cqe.user_data,
mask, IORING_CQE_F_MORE);
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
if (unlikely(!filled))
return -ECANCELED;
io_cqring_ev_posted(ctx);
} else if (req->cqe.res) {
return 0;
if (filled) {
io_cqring_ev_posted(ctx);
continue;
}
return -ECANCELED;
}
io_tw_lock(req->ctx, locked);
if (unlikely(req->task->flags & PF_EXITING))
return -EFAULT;
ret = io_issue_sqe(req,
IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
if (ret)
return ret;
/*
* Release all references, retry if someone tried to restart
* task_work while we were executing it.
@ -6077,7 +6092,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
struct io_ring_ctx *ctx = req->ctx;
int ret;
ret = io_poll_check_events(req, *locked);
ret = io_poll_check_events(req, locked);
if (ret > 0)
return;
@ -6102,7 +6117,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
struct io_ring_ctx *ctx = req->ctx;
int ret;
ret = io_poll_check_events(req, *locked);
ret = io_poll_check_events(req, locked);
if (ret > 0)
return;
@ -6343,7 +6358,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
struct io_ring_ctx *ctx = req->ctx;
struct async_poll *apoll;
struct io_poll_table ipt;
__poll_t mask = IO_ASYNC_POLL_COMMON | POLLERR;
__poll_t mask = POLLPRI | POLLERR;
int ret;
if (!def->pollin && !def->pollout)
@ -6352,6 +6367,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
return IO_APOLL_ABORTED;
if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
return IO_APOLL_ABORTED;
if (!(req->flags & REQ_F_APOLL_MULTISHOT))
mask |= EPOLLONESHOT;
if (def->pollin) {
mask |= POLLIN | POLLRDNORM;