io_uring-5.15-2021-10-22
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmFzfyQQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpiO/D/9cqYHpjGDwyftzQFJFfEy9ny6nlLm6lJef hsrZjC0S649FnXc0YHVLDH3/nos0XsQUYvVJnAMW9EHB6x/95JRUyxzouVz1Fewp w8Z+lOKymIf3X1LQoB6KQXH5ayohNtUo6HA0Ye/v+iEG+bq/lo9tCMSshpJs3afq UWW8RxGhrMHfqfgn/8Kkz8fEqZjXz7tssZ+1AFftTxKbk97ZWPahwjvO+xLFWl/m NbMkHf3xeAvDL747ccrVBOerRZUPySXZElgkPzdjQ4y5HHZrpxt/ZR9Xu7XRzgkJ 7SEmsJ80vla19u3eW/oAn3T4EEGS3qWlei8T47kKIoT1W52S3rqjwsV/30re16GW sGMWdFiH/GW3VnOxs0/a4/q70je3E9DicSTs4SALTwnvjQ+vrunWgG6ojtxLcieT Br+km8nmDPug1wxoH2gQLN/EhGcH5hQvi4ZMiMH8MWalYpEkIADOOvAwp0GDwVoE 6DxWeYs57rdSQnSLxDah+mAqBokqswJ/ZmuBOO/iSqXCImehLs0VL1Y+TsThVbRy epnBdqLk5PbDpODcYTl7on3MD3hpoHjbpnAPah0py57sroiY73sNE/ms1AUsqYPs fAe5tjFwhGhVWRiZMGOAG6kgTtSdxG134c0Lyvy6xACTR8rJfgcnWMwFJDWK2GDn ReGYJcgEOA== =ywLV -----END PGP SIGNATURE----- Merge tag 'io_uring-5.15-2021-10-22' of git://git.kernel.dk/linux-block Pull io_uring fixes from Jens Axboe: "Two fixes for the max workers limit API that was introduced this series: one fix for an issue with that code, and one fixing a linked timeout regression in this series" * tag 'io_uring-5.15-2021-10-22' of git://git.kernel.dk/linux-block: io_uring: apply worker limits to previous users io_uring: fix ltimeout unprep io_uring: apply max_workers limit to all future users io-wq: max_worker fixes
This commit is contained in:
commit
da4d34b669
|
@ -253,7 +253,7 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
|
|||
pr_warn_once("io-wq is not configured for unbound workers");
|
||||
|
||||
raw_spin_lock(&wqe->lock);
|
||||
if (acct->nr_workers == acct->max_workers) {
|
||||
if (acct->nr_workers >= acct->max_workers) {
|
||||
raw_spin_unlock(&wqe->lock);
|
||||
return true;
|
||||
}
|
||||
|
@ -1291,15 +1291,18 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
|
|||
|
||||
rcu_read_lock();
|
||||
for_each_node(node) {
|
||||
struct io_wqe *wqe = wq->wqes[node];
|
||||
struct io_wqe_acct *acct;
|
||||
|
||||
raw_spin_lock(&wqe->lock);
|
||||
for (i = 0; i < IO_WQ_ACCT_NR; i++) {
|
||||
acct = &wq->wqes[node]->acct[i];
|
||||
acct = &wqe->acct[i];
|
||||
prev = max_t(int, acct->max_workers, prev);
|
||||
if (new_count[i])
|
||||
acct->max_workers = new_count[i];
|
||||
new_count[i] = prev;
|
||||
}
|
||||
raw_spin_unlock(&wqe->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
|
|
|
@ -456,6 +456,8 @@ struct io_ring_ctx {
|
|||
struct work_struct exit_work;
|
||||
struct list_head tctx_list;
|
||||
struct completion ref_comp;
|
||||
u32 iowq_limits[2];
|
||||
bool iowq_limits_set;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -1368,11 +1370,6 @@ static void io_req_track_inflight(struct io_kiocb *req)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void io_unprep_linked_timeout(struct io_kiocb *req)
|
||||
{
|
||||
req->flags &= ~REQ_F_LINK_TIMEOUT;
|
||||
}
|
||||
|
||||
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
|
||||
{
|
||||
if (WARN_ON_ONCE(!req->link))
|
||||
|
@ -6983,7 +6980,7 @@ issue_sqe:
|
|||
switch (io_arm_poll_handler(req)) {
|
||||
case IO_APOLL_READY:
|
||||
if (linked_timeout)
|
||||
io_unprep_linked_timeout(req);
|
||||
io_queue_linked_timeout(linked_timeout);
|
||||
goto issue_sqe;
|
||||
case IO_APOLL_ABORTED:
|
||||
/*
|
||||
|
@ -9638,7 +9635,16 @@ static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
|
|||
ret = io_uring_alloc_task_context(current, ctx);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
tctx = current->io_uring;
|
||||
if (ctx->iowq_limits_set) {
|
||||
unsigned int limits[2] = { ctx->iowq_limits[0],
|
||||
ctx->iowq_limits[1], };
|
||||
|
||||
ret = io_wq_max_workers(tctx->io_wq, limits);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
|
||||
node = kmalloc(sizeof(*node), GFP_KERNEL);
|
||||
|
@ -10643,7 +10649,9 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
|
|||
|
||||
static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
|
||||
void __user *arg)
|
||||
__must_hold(&ctx->uring_lock)
|
||||
{
|
||||
struct io_tctx_node *node;
|
||||
struct io_uring_task *tctx = NULL;
|
||||
struct io_sq_data *sqd = NULL;
|
||||
__u32 new_count[2];
|
||||
|
@ -10674,13 +10682,19 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
|
|||
tctx = current->io_uring;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
if (!tctx || !tctx->io_wq)
|
||||
goto err;
|
||||
BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
|
||||
|
||||
ret = io_wq_max_workers(tctx->io_wq, new_count);
|
||||
if (ret)
|
||||
goto err;
|
||||
memcpy(ctx->iowq_limits, new_count, sizeof(new_count));
|
||||
ctx->iowq_limits_set = true;
|
||||
|
||||
ret = -EINVAL;
|
||||
if (tctx && tctx->io_wq) {
|
||||
ret = io_wq_max_workers(tctx->io_wq, new_count);
|
||||
if (ret)
|
||||
goto err;
|
||||
} else {
|
||||
memset(new_count, 0, sizeof(new_count));
|
||||
}
|
||||
|
||||
if (sqd) {
|
||||
mutex_unlock(&sqd->lock);
|
||||
|
@ -10690,6 +10704,22 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
|
|||
if (copy_to_user(arg, new_count, sizeof(new_count)))
|
||||
return -EFAULT;
|
||||
|
||||
/* that's it for SQPOLL, only the SQPOLL task creates requests */
|
||||
if (sqd)
|
||||
return 0;
|
||||
|
||||
/* now propagate the restriction to all registered users */
|
||||
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
|
||||
struct io_uring_task *tctx = node->task->io_uring;
|
||||
|
||||
if (WARN_ON_ONCE(!tctx->io_wq))
|
||||
continue;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(new_count); i++)
|
||||
new_count[i] = ctx->iowq_limits[i];
|
||||
/* ignore errors, it always returns zero anyway */
|
||||
(void)io_wq_max_workers(tctx->io_wq, new_count);
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
if (sqd) {
|
||||
|
|
Loading…
Reference in New Issue