io_uring: drop the old style inflight file tracking
io_uring tracks requests that are referencing an io_uring descriptor to
be able to cancel without worrying about loops in the references. Since
we now assign the file at execution time, the easier approach is to drop
a potentially problematic reference before we punt the request. This
eliminates the need to special case these types of files beyond just
marking them as such, and simplifies cancelation quite a bit.
This also fixes a recent issue where an async punted tee operation would
with the io_uring descriptor as the output file would crash when
attempting to get a reference to the file from the io-wq worker. We
could have worked around that, but this is the much cleaner fix.
Fixes: 6bf9c47a39
("io_uring: defer file assignment")
Reported-by: syzbot+c4b9303500a21750b250@syzkaller.appspotmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
6bf9c47a39
commit
d5361233e9
|
@ -112,8 +112,7 @@
|
||||||
IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
|
IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
|
||||||
|
|
||||||
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
|
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
|
||||||
REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
|
REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
|
||||||
REQ_F_ASYNC_DATA)
|
|
||||||
|
|
||||||
#define IO_TCTX_REFS_CACHE_NR (1U << 10)
|
#define IO_TCTX_REFS_CACHE_NR (1U << 10)
|
||||||
|
|
||||||
|
@ -500,7 +499,6 @@ struct io_uring_task {
|
||||||
const struct io_ring_ctx *last;
|
const struct io_ring_ctx *last;
|
||||||
struct io_wq *io_wq;
|
struct io_wq *io_wq;
|
||||||
struct percpu_counter inflight;
|
struct percpu_counter inflight;
|
||||||
atomic_t inflight_tracked;
|
|
||||||
atomic_t in_idle;
|
atomic_t in_idle;
|
||||||
|
|
||||||
spinlock_t task_lock;
|
spinlock_t task_lock;
|
||||||
|
@ -1186,6 +1184,8 @@ static void io_clean_op(struct io_kiocb *req);
|
||||||
static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
|
static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
|
||||||
unsigned issue_flags);
|
unsigned issue_flags);
|
||||||
static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
|
static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
|
||||||
|
static void io_drop_inflight_file(struct io_kiocb *req);
|
||||||
|
static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
|
||||||
static void __io_queue_sqe(struct io_kiocb *req);
|
static void __io_queue_sqe(struct io_kiocb *req);
|
||||||
static void io_rsrc_put_work(struct work_struct *work);
|
static void io_rsrc_put_work(struct work_struct *work);
|
||||||
|
|
||||||
|
@ -1433,29 +1433,9 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
|
||||||
bool cancel_all)
|
bool cancel_all)
|
||||||
__must_hold(&req->ctx->timeout_lock)
|
__must_hold(&req->ctx->timeout_lock)
|
||||||
{
|
{
|
||||||
struct io_kiocb *req;
|
|
||||||
|
|
||||||
if (task && head->task != task)
|
if (task && head->task != task)
|
||||||
return false;
|
return false;
|
||||||
if (cancel_all)
|
return cancel_all;
|
||||||
return true;
|
|
||||||
|
|
||||||
io_for_each_link(req, head) {
|
|
||||||
if (req->flags & REQ_F_INFLIGHT)
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool io_match_linked(struct io_kiocb *head)
|
|
||||||
{
|
|
||||||
struct io_kiocb *req;
|
|
||||||
|
|
||||||
io_for_each_link(req, head) {
|
|
||||||
if (req->flags & REQ_F_INFLIGHT)
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1465,24 +1445,9 @@ static bool io_match_linked(struct io_kiocb *head)
|
||||||
static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
|
static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
|
||||||
bool cancel_all)
|
bool cancel_all)
|
||||||
{
|
{
|
||||||
bool matched;
|
|
||||||
|
|
||||||
if (task && head->task != task)
|
if (task && head->task != task)
|
||||||
return false;
|
return false;
|
||||||
if (cancel_all)
|
return cancel_all;
|
||||||
return true;
|
|
||||||
|
|
||||||
if (head->flags & REQ_F_LINK_TIMEOUT) {
|
|
||||||
struct io_ring_ctx *ctx = head->ctx;
|
|
||||||
|
|
||||||
/* protect against races with linked timeouts */
|
|
||||||
spin_lock_irq(&ctx->timeout_lock);
|
|
||||||
matched = io_match_linked(head);
|
|
||||||
spin_unlock_irq(&ctx->timeout_lock);
|
|
||||||
} else {
|
|
||||||
matched = io_match_linked(head);
|
|
||||||
}
|
|
||||||
return matched;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool req_has_async_data(struct io_kiocb *req)
|
static inline bool req_has_async_data(struct io_kiocb *req)
|
||||||
|
@ -1645,14 +1610,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
|
||||||
return req->flags & REQ_F_FIXED_FILE;
|
return req->flags & REQ_F_FIXED_FILE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void io_req_track_inflight(struct io_kiocb *req)
|
|
||||||
{
|
|
||||||
if (!(req->flags & REQ_F_INFLIGHT)) {
|
|
||||||
req->flags |= REQ_F_INFLIGHT;
|
|
||||||
atomic_inc(¤t->io_uring->inflight_tracked);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
|
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
if (WARN_ON_ONCE(!req->link))
|
if (WARN_ON_ONCE(!req->link))
|
||||||
|
@ -2563,6 +2520,8 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
|
||||||
|
|
||||||
WARN_ON_ONCE(!tctx);
|
WARN_ON_ONCE(!tctx);
|
||||||
|
|
||||||
|
io_drop_inflight_file(req);
|
||||||
|
|
||||||
spin_lock_irqsave(&tctx->task_lock, flags);
|
spin_lock_irqsave(&tctx->task_lock, flags);
|
||||||
if (priority)
|
if (priority)
|
||||||
wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
|
wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
|
||||||
|
@ -6008,6 +5967,9 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
|
||||||
if (!req->result) {
|
if (!req->result) {
|
||||||
struct poll_table_struct pt = { ._key = req->cflags };
|
struct poll_table_struct pt = { ._key = req->cflags };
|
||||||
|
|
||||||
|
if (unlikely(!io_assign_file(req, IO_URING_F_UNLOCKED)))
|
||||||
|
req->result = -EBADF;
|
||||||
|
else
|
||||||
req->result = vfs_poll(req->file, &pt) & req->cflags;
|
req->result = vfs_poll(req->file, &pt) & req->cflags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7226,11 +7188,6 @@ static void io_clean_op(struct io_kiocb *req)
|
||||||
kfree(req->apoll);
|
kfree(req->apoll);
|
||||||
req->apoll = NULL;
|
req->apoll = NULL;
|
||||||
}
|
}
|
||||||
if (req->flags & REQ_F_INFLIGHT) {
|
|
||||||
struct io_uring_task *tctx = req->task->io_uring;
|
|
||||||
|
|
||||||
atomic_dec(&tctx->inflight_tracked);
|
|
||||||
}
|
|
||||||
if (req->flags & REQ_F_CREDS)
|
if (req->flags & REQ_F_CREDS)
|
||||||
put_cred(req->creds);
|
put_cred(req->creds);
|
||||||
if (req->flags & REQ_F_ASYNC_DATA) {
|
if (req->flags & REQ_F_ASYNC_DATA) {
|
||||||
|
@ -7522,6 +7479,19 @@ out:
|
||||||
return file;
|
return file;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Drop the file for requeue operations. Only used of req->file is the
|
||||||
|
* io_uring descriptor itself.
|
||||||
|
*/
|
||||||
|
static void io_drop_inflight_file(struct io_kiocb *req)
|
||||||
|
{
|
||||||
|
if (unlikely(req->flags & REQ_F_INFLIGHT)) {
|
||||||
|
fput(req->file);
|
||||||
|
req->file = NULL;
|
||||||
|
req->flags &= ~REQ_F_INFLIGHT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
|
static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
|
||||||
{
|
{
|
||||||
struct file *file = fget(fd);
|
struct file *file = fget(fd);
|
||||||
|
@ -7529,8 +7499,8 @@ static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
|
||||||
trace_io_uring_file_get(req->ctx, req, req->user_data, fd);
|
trace_io_uring_file_get(req->ctx, req, req->user_data, fd);
|
||||||
|
|
||||||
/* we don't allow fixed io_uring files */
|
/* we don't allow fixed io_uring files */
|
||||||
if (file && unlikely(file->f_op == &io_uring_fops))
|
if (file && file->f_op == &io_uring_fops)
|
||||||
io_req_track_inflight(req);
|
req->flags |= REQ_F_INFLIGHT;
|
||||||
return file;
|
return file;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9437,7 +9407,6 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
|
||||||
xa_init(&tctx->xa);
|
xa_init(&tctx->xa);
|
||||||
init_waitqueue_head(&tctx->wait);
|
init_waitqueue_head(&tctx->wait);
|
||||||
atomic_set(&tctx->in_idle, 0);
|
atomic_set(&tctx->in_idle, 0);
|
||||||
atomic_set(&tctx->inflight_tracked, 0);
|
|
||||||
task->io_uring = tctx;
|
task->io_uring = tctx;
|
||||||
spin_lock_init(&tctx->task_lock);
|
spin_lock_init(&tctx->task_lock);
|
||||||
INIT_WQ_LIST(&tctx->task_list);
|
INIT_WQ_LIST(&tctx->task_list);
|
||||||
|
@ -10630,7 +10599,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
|
||||||
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
|
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
|
||||||
{
|
{
|
||||||
if (tracked)
|
if (tracked)
|
||||||
return atomic_read(&tctx->inflight_tracked);
|
return 0;
|
||||||
return percpu_counter_sum(&tctx->inflight);
|
return percpu_counter_sum(&tctx->inflight);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue