io_wq: add per-wq work handler instead of per work
io_uring is the only user of io-wq, and now it uses only io-wq callback for all its requests, namely io_wq_submit_work(). Instead of storing work->runner callback in each instance of io_wq_work, keep it in io-wq itself. pros: - reduces io_wq_work size - more robust -- ->func won't be invalidated with mem{cpy,set}(req) - helps other work Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
d4c81f3852
commit
f5fa38c59c
10
fs/io-wq.c
10
fs/io-wq.c
|
@ -112,6 +112,7 @@ struct io_wq {
|
|||
unsigned long state;
|
||||
|
||||
free_work_fn *free_work;
|
||||
io_wq_work_fn *do_work;
|
||||
|
||||
struct task_struct *manager;
|
||||
struct user_struct *user;
|
||||
|
@ -528,7 +529,7 @@ get_next:
|
|||
|
||||
hash = io_get_work_hash(work);
|
||||
linked = old_work = work;
|
||||
linked->func(&linked);
|
||||
wq->do_work(&linked);
|
||||
linked = (old_work == linked) ? NULL : linked;
|
||||
|
||||
work = next_hashed;
|
||||
|
@ -785,7 +786,7 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
|
|||
struct io_wq_work *old_work = work;
|
||||
|
||||
work->flags |= IO_WQ_WORK_CANCEL;
|
||||
work->func(&work);
|
||||
wq->do_work(&work);
|
||||
work = (work == old_work) ? NULL : work;
|
||||
wq->free_work(old_work);
|
||||
} while (work);
|
||||
|
@ -1023,7 +1024,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
|||
int ret = -ENOMEM, node;
|
||||
struct io_wq *wq;
|
||||
|
||||
if (WARN_ON_ONCE(!data->free_work))
|
||||
if (WARN_ON_ONCE(!data->free_work || !data->do_work))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
wq = kzalloc(sizeof(*wq), GFP_KERNEL);
|
||||
|
@ -1037,6 +1038,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
|||
}
|
||||
|
||||
wq->free_work = data->free_work;
|
||||
wq->do_work = data->do_work;
|
||||
|
||||
/* caller must already hold a reference to this */
|
||||
wq->user = data->user;
|
||||
|
@ -1093,7 +1095,7 @@ err:
|
|||
|
||||
bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
|
||||
{
|
||||
if (data->free_work != wq->free_work)
|
||||
if (data->free_work != wq->free_work || data->do_work != wq->do_work)
|
||||
return false;
|
||||
|
||||
return refcount_inc_not_zero(&wq->use_refs);
|
||||
|
|
|
@ -85,7 +85,6 @@ static inline void wq_list_del(struct io_wq_work_list *list,
|
|||
|
||||
struct io_wq_work {
|
||||
struct io_wq_work_node list;
|
||||
void (*func)(struct io_wq_work **);
|
||||
struct files_struct *files;
|
||||
struct mm_struct *mm;
|
||||
const struct cred *creds;
|
||||
|
@ -94,9 +93,9 @@ struct io_wq_work {
|
|||
pid_t task_pid;
|
||||
};
|
||||
|
||||
#define INIT_IO_WORK(work, _func) \
|
||||
#define INIT_IO_WORK(work) \
|
||||
do { \
|
||||
*(work) = (struct io_wq_work){ .func = _func }; \
|
||||
*(work) = (struct io_wq_work){}; \
|
||||
} while (0) \
|
||||
|
||||
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
|
||||
|
@ -108,10 +107,12 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
|
|||
}
|
||||
|
||||
typedef void (free_work_fn)(struct io_wq_work *);
|
||||
typedef void (io_wq_work_fn)(struct io_wq_work **);
|
||||
|
||||
struct io_wq_data {
|
||||
struct user_struct *user;
|
||||
|
||||
io_wq_work_fn *do_work;
|
||||
free_work_fn *free_work;
|
||||
};
|
||||
|
||||
|
|
|
@ -5776,7 +5776,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
|||
refcount_set(&req->refs, 2);
|
||||
req->task = NULL;
|
||||
req->result = 0;
|
||||
INIT_IO_WORK(&req->work, io_wq_submit_work);
|
||||
INIT_IO_WORK(&req->work);
|
||||
|
||||
if (unlikely(req->opcode >= IORING_OP_LAST))
|
||||
return -EINVAL;
|
||||
|
@ -6796,6 +6796,7 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx,
|
|||
|
||||
data.user = ctx->user;
|
||||
data.free_work = io_free_work;
|
||||
data.do_work = io_wq_submit_work;
|
||||
|
||||
if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
|
||||
/* Do QD, or 4 * CPUS, whatever is smallest */
|
||||
|
|
Loading…
Reference in New Issue