io-wq: remove io_wq_flush and IO_WQ_WORK_INTERNAL
io_wq_flush() is buggy, during cancelation of a flush, the associated work may be passed to the caller's (i.e. io_uring) @match callback. That callback is expecting it to be embedded in struct io_kiocb. Cancelation of internal work probably doesn't make a lot of sense to begin with. As the flush helper is no longer used, just delete it and the associated work flag. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
fc04c39bae
commit
80ad894382
38
fs/io-wq.c
38
fs/io-wq.c
|
@ -502,7 +502,7 @@ next:
|
||||||
if (worker->mm)
|
if (worker->mm)
|
||||||
work->flags |= IO_WQ_WORK_HAS_MM;
|
work->flags |= IO_WQ_WORK_HAS_MM;
|
||||||
|
|
||||||
if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL)) {
|
if (wq->get_work) {
|
||||||
put_work = work;
|
put_work = work;
|
||||||
wq->get_work(work);
|
wq->get_work(work);
|
||||||
}
|
}
|
||||||
|
@ -1057,42 +1057,6 @@ enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct io_wq_flush_data {
|
|
||||||
struct io_wq_work work;
|
|
||||||
struct completion done;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void io_wq_flush_func(struct io_wq_work **workptr)
|
|
||||||
{
|
|
||||||
struct io_wq_work *work = *workptr;
|
|
||||||
struct io_wq_flush_data *data;
|
|
||||||
|
|
||||||
data = container_of(work, struct io_wq_flush_data, work);
|
|
||||||
complete(&data->done);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Doesn't wait for previously queued work to finish. When this completes,
|
|
||||||
* it just means that previously queued work was started.
|
|
||||||
*/
|
|
||||||
void io_wq_flush(struct io_wq *wq)
|
|
||||||
{
|
|
||||||
struct io_wq_flush_data data;
|
|
||||||
int node;
|
|
||||||
|
|
||||||
for_each_node(node) {
|
|
||||||
struct io_wqe *wqe = wq->wqes[node];
|
|
||||||
|
|
||||||
if (!node_online(node))
|
|
||||||
continue;
|
|
||||||
init_completion(&data.done);
|
|
||||||
INIT_IO_WORK(&data.work, io_wq_flush_func);
|
|
||||||
data.work.flags |= IO_WQ_WORK_INTERNAL;
|
|
||||||
io_wqe_enqueue(wqe, &data.work);
|
|
||||||
wait_for_completion(&data.done);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
||||||
{
|
{
|
||||||
int ret = -ENOMEM, node;
|
int ret = -ENOMEM, node;
|
||||||
|
|
|
@ -8,7 +8,6 @@ enum {
|
||||||
IO_WQ_WORK_HAS_MM = 2,
|
IO_WQ_WORK_HAS_MM = 2,
|
||||||
IO_WQ_WORK_HASHED = 4,
|
IO_WQ_WORK_HASHED = 4,
|
||||||
IO_WQ_WORK_UNBOUND = 32,
|
IO_WQ_WORK_UNBOUND = 32,
|
||||||
IO_WQ_WORK_INTERNAL = 64,
|
|
||||||
IO_WQ_WORK_CB = 128,
|
IO_WQ_WORK_CB = 128,
|
||||||
IO_WQ_WORK_NO_CANCEL = 256,
|
IO_WQ_WORK_NO_CANCEL = 256,
|
||||||
IO_WQ_WORK_CONCURRENT = 512,
|
IO_WQ_WORK_CONCURRENT = 512,
|
||||||
|
@ -100,7 +99,6 @@ void io_wq_destroy(struct io_wq *wq);
|
||||||
|
|
||||||
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
|
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
|
||||||
void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
|
void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
|
||||||
void io_wq_flush(struct io_wq *wq);
|
|
||||||
|
|
||||||
void io_wq_cancel_all(struct io_wq *wq);
|
void io_wq_cancel_all(struct io_wq *wq);
|
||||||
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
|
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
|
||||||
|
|
Loading…
Reference in New Issue