io_uring-5.11-2021-01-01
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl/vOCwQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpqA7D/9AyFvg16KEgfYCN2OYXU5jyphu7sCCb8Cx PJ4H+Lf7fWki+/yFdXLxQnuBMGEOYWqtEIPN9CnO/I1ixzOoNugxiFAyerhd/Noh COg2EUsUrWq/zobYP60wN9pBPnW6EHTnFVA02kMVKunm4d5O5DZWPXy5BwA9yU3u dE9LoYDjFiaahogi3x+EmYStexxT0FB0d5WTONA7qSFrskeNbyVaYy8mY09jPynG IbG41fv2n0Zwlcx4XDCebsZ1+08rAGZFhwiq8VBhPNiz7sOud9jW7rRFHXR2FVoo DsW2npiYHVvOYqkl1HjXw5Mo6p8UKrDEDAIS7OOAHXM9Lz2/YGS9h9ogROccBta2 5er12VaahIEiH05KtxpGv/q+vyJK7Gdqg0jSuSzKHSdSpTS10Ejh82Xo2V6lRedb gP03ZiDZjLtvh8F5hrWTJqPTtnFDRkY/I7R3WP1Ga7mqajFhpFDMvjvyEMMBCz+K KGjMfahNo2nzc9nu5M1VjX42tz5VxKjA3N2netxBfDMVB/GpGcQ7xygS85wx7VPn UUChgqw0aJrrq5slOZEAVqSsBN/wN97+m6uLLdk025CzQngwiw5fkTooakPxnGee bW9WKMpWBj/ipPXvU5C1tvHk4gxMg+cmxcr6EZ3uaWfE+MC7Xk9c00lNF62CT0Xm e+0RWRV1ig== =XYT5 -----END PGP SIGNATURE----- Merge tag 'io_uring-5.11-2021-01-01' of git://git.kernel.dk/linux-block Pull io_uring fixes from Jens Axboe: "A few fixes that should go into 5.11, all marked for stable as well: - Fix issue around identity COW'ing and users that share a ring across processes - Fix a hang associated with unregistering fixed files (Pavel) - Move the 'process is exiting' cancelation a bit earlier, so task_works aren't affected by it (Pavel)" * tag 'io_uring-5.11-2021-01-01' of git://git.kernel.dk/linux-block: kernel/io_uring: cancel io_uring before task works io_uring: fix io_sqe_files_unregister() hangs io_uring: add a helper for setting a ref node io_uring: don't assume mm is constant across submits
This commit is contained in:
commit
dc3e24b214
|
@ -21,7 +21,6 @@
|
|||
#include <linux/rcupdate.h>
|
||||
#include <linux/close_range.h>
|
||||
#include <net/sock.h>
|
||||
#include <linux/io_uring.h>
|
||||
|
||||
unsigned int sysctl_nr_open __read_mostly = 1024*1024;
|
||||
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
|
||||
|
@ -428,7 +427,6 @@ void exit_files(struct task_struct *tsk)
|
|||
struct files_struct * files = tsk->files;
|
||||
|
||||
if (files) {
|
||||
io_uring_files_cancel(files);
|
||||
task_lock(tsk);
|
||||
tsk->files = NULL;
|
||||
task_unlock(tsk);
|
||||
|
|
|
@ -992,6 +992,10 @@ enum io_mem_account {
|
|||
ACCT_PINNED,
|
||||
};
|
||||
|
||||
static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
|
||||
static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
|
||||
struct io_ring_ctx *ctx);
|
||||
|
||||
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
|
||||
struct io_comp_state *cs);
|
||||
static void io_cqring_fill_event(struct io_kiocb *req, long res);
|
||||
|
@ -1501,6 +1505,13 @@ static bool io_grab_identity(struct io_kiocb *req)
|
|||
spin_unlock_irq(&ctx->inflight_lock);
|
||||
req->work.flags |= IO_WQ_WORK_FILES;
|
||||
}
|
||||
if (!(req->work.flags & IO_WQ_WORK_MM) &&
|
||||
(def->work_flags & IO_WQ_WORK_MM)) {
|
||||
if (id->mm != current->mm)
|
||||
return false;
|
||||
mmgrab(id->mm);
|
||||
req->work.flags |= IO_WQ_WORK_MM;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1525,13 +1536,6 @@ static void io_prep_async_work(struct io_kiocb *req)
|
|||
req->work.flags |= IO_WQ_WORK_UNBOUND;
|
||||
}
|
||||
|
||||
/* ->mm can never change on us */
|
||||
if (!(req->work.flags & IO_WQ_WORK_MM) &&
|
||||
(def->work_flags & IO_WQ_WORK_MM)) {
|
||||
mmgrab(id->mm);
|
||||
req->work.flags |= IO_WQ_WORK_MM;
|
||||
}
|
||||
|
||||
/* if we fail grabbing identity, we must COW, regrab, and retry */
|
||||
if (io_grab_identity(req))
|
||||
return;
|
||||
|
@ -7231,14 +7235,28 @@ static void io_file_ref_kill(struct percpu_ref *ref)
|
|||
complete(&data->done);
|
||||
}
|
||||
|
||||
static void io_sqe_files_set_node(struct fixed_file_data *file_data,
|
||||
struct fixed_file_ref_node *ref_node)
|
||||
{
|
||||
spin_lock_bh(&file_data->lock);
|
||||
file_data->node = ref_node;
|
||||
list_add_tail(&ref_node->node, &file_data->ref_list);
|
||||
spin_unlock_bh(&file_data->lock);
|
||||
percpu_ref_get(&file_data->refs);
|
||||
}
|
||||
|
||||
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct fixed_file_data *data = ctx->file_data;
|
||||
struct fixed_file_ref_node *ref_node = NULL;
|
||||
struct fixed_file_ref_node *backup_node, *ref_node = NULL;
|
||||
unsigned nr_tables, i;
|
||||
int ret;
|
||||
|
||||
if (!data)
|
||||
return -ENXIO;
|
||||
backup_node = alloc_fixed_file_ref_node(ctx);
|
||||
if (!backup_node)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_bh(&data->lock);
|
||||
ref_node = data->node;
|
||||
|
@ -7250,7 +7268,18 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
|||
|
||||
/* wait for all refs nodes to complete */
|
||||
flush_delayed_work(&ctx->file_put_work);
|
||||
wait_for_completion(&data->done);
|
||||
do {
|
||||
ret = wait_for_completion_interruptible(&data->done);
|
||||
if (!ret)
|
||||
break;
|
||||
ret = io_run_task_work_sig();
|
||||
if (ret < 0) {
|
||||
percpu_ref_resurrect(&data->refs);
|
||||
reinit_completion(&data->done);
|
||||
io_sqe_files_set_node(data, backup_node);
|
||||
return ret;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
__io_sqe_files_unregister(ctx);
|
||||
nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
|
||||
|
@ -7261,6 +7290,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
|||
kfree(data);
|
||||
ctx->file_data = NULL;
|
||||
ctx->nr_user_files = 0;
|
||||
destroy_fixed_file_ref_node(backup_node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -7758,11 +7788,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|||
return PTR_ERR(ref_node);
|
||||
}
|
||||
|
||||
file_data->node = ref_node;
|
||||
spin_lock_bh(&file_data->lock);
|
||||
list_add_tail(&ref_node->node, &file_data->ref_list);
|
||||
spin_unlock_bh(&file_data->lock);
|
||||
percpu_ref_get(&file_data->refs);
|
||||
io_sqe_files_set_node(file_data, ref_node);
|
||||
return ret;
|
||||
out_fput:
|
||||
for (i = 0; i < ctx->nr_user_files; i++) {
|
||||
|
@ -7918,11 +7944,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
|||
|
||||
if (needs_switch) {
|
||||
percpu_ref_kill(&data->node->refs);
|
||||
spin_lock_bh(&data->lock);
|
||||
list_add_tail(&ref_node->node, &data->ref_list);
|
||||
data->node = ref_node;
|
||||
spin_unlock_bh(&data->lock);
|
||||
percpu_ref_get(&ctx->file_data->refs);
|
||||
io_sqe_files_set_node(data, ref_node);
|
||||
} else
|
||||
destroy_fixed_file_ref_node(ref_node);
|
||||
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
#include <linux/random.h>
|
||||
#include <linux/rcuwait.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/io_uring.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
|
@ -776,6 +777,7 @@ void __noreturn do_exit(long code)
|
|||
schedule();
|
||||
}
|
||||
|
||||
io_uring_files_cancel(tsk->files);
|
||||
exit_signals(tsk); /* sets PF_EXITING */
|
||||
|
||||
/* sync mm's RSS info before statistics gathering */
|
||||
|
|
Loading…
Reference in New Issue