io_uring: split poll and poll_remove structs

Don't use a single struct for polls and poll remove requests, they have
totally different layouts.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2020-10-27 23:17:18 +00:00 committed by Jens Axboe
parent 14a1143b68
commit 018043be1f
1 changed files with 9 additions and 8 deletions

View File

@ -395,16 +395,18 @@ struct io_ring_ctx {
*/ */
struct io_poll_iocb { struct io_poll_iocb {
struct file *file; struct file *file;
union { struct wait_queue_head *head;
struct wait_queue_head *head;
u64 addr;
};
__poll_t events; __poll_t events;
bool done; bool done;
bool canceled; bool canceled;
struct wait_queue_entry wait; struct wait_queue_entry wait;
}; };
struct io_poll_remove {
struct file *file;
u64 addr;
};
struct io_close { struct io_close {
struct file *file; struct file *file;
struct file *put_file; struct file *put_file;
@ -672,6 +674,7 @@ struct io_kiocb {
struct file *file; struct file *file;
struct io_rw rw; struct io_rw rw;
struct io_poll_iocb poll; struct io_poll_iocb poll;
struct io_poll_remove poll_remove;
struct io_accept accept; struct io_accept accept;
struct io_sync sync; struct io_sync sync;
struct io_cancel cancel; struct io_cancel cancel;
@ -5538,7 +5541,7 @@ static int io_poll_remove_prep(struct io_kiocb *req,
sqe->poll_events) sqe->poll_events)
return -EINVAL; return -EINVAL;
req->poll.addr = READ_ONCE(sqe->addr); req->poll_remove.addr = READ_ONCE(sqe->addr);
return 0; return 0;
} }
@ -5549,12 +5552,10 @@ static int io_poll_remove_prep(struct io_kiocb *req,
static int io_poll_remove(struct io_kiocb *req) static int io_poll_remove(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
u64 addr;
int ret; int ret;
addr = req->poll.addr;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
ret = io_poll_cancel(ctx, addr); ret = io_poll_cancel(ctx, req->poll_remove.addr);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
if (ret < 0) if (ret < 0)