io_uring: introduce a struct for hash table
Instead of passing around a pointer to hash buckets, add a bit of type safety and wrap it into a structure. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/d65bc3faba537ec2aca9eabf334394936d44bd28.1655371007.git.asml.silence@gmail.com Reviewed-by: Hao Xu <howeyxu@tencent.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
a2cdd51932
commit
e6f89be614
|
@ -193,12 +193,12 @@ done:
|
|||
return IOU_OK;
|
||||
}
|
||||
|
||||
void init_hash_table(struct io_hash_bucket *hash_table, unsigned size)
|
||||
void init_hash_table(struct io_hash_table *table, unsigned size)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
spin_lock_init(&hash_table[i].lock);
|
||||
INIT_HLIST_HEAD(&hash_table[i].list);
|
||||
spin_lock_init(&table->hbs[i].lock);
|
||||
INIT_HLIST_HEAD(&table->hbs[i].list);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,9 +4,4 @@ int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|||
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);
|
||||
|
||||
int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd);
|
||||
void init_hash_table(struct io_hash_bucket *hash_table, unsigned size);
|
||||
|
||||
struct io_hash_bucket {
|
||||
spinlock_t lock;
|
||||
struct hlist_head list;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
void init_hash_table(struct io_hash_table *table, unsigned size);
|
||||
|
|
|
@ -158,8 +158,8 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
|
|||
mutex_unlock(&ctx->uring_lock);
|
||||
|
||||
seq_puts(m, "PollList:\n");
|
||||
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
|
||||
struct io_hash_bucket *hb = &ctx->cancel_hash[i];
|
||||
for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
|
||||
struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
|
||||
struct io_kiocb *req;
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
|
|
|
@ -241,11 +241,23 @@ static __cold void io_fallback_req_func(struct work_struct *work)
|
|||
percpu_ref_put(&ctx->refs);
|
||||
}
|
||||
|
||||
static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
|
||||
{
|
||||
unsigned hash_buckets = 1U << bits;
|
||||
size_t hash_size = hash_buckets * sizeof(table->hbs[0]);
|
||||
|
||||
table->hbs = kmalloc(hash_size, GFP_KERNEL);
|
||||
if (!table->hbs)
|
||||
return -ENOMEM;
|
||||
|
||||
table->hash_bits = bits;
|
||||
init_hash_table(table, hash_buckets);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
{
|
||||
struct io_ring_ctx *ctx;
|
||||
unsigned hash_buckets;
|
||||
size_t hash_size;
|
||||
int hash_bits;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
|
@ -261,16 +273,9 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
|||
*/
|
||||
hash_bits = ilog2(p->cq_entries) - 5;
|
||||
hash_bits = clamp(hash_bits, 1, 8);
|
||||
hash_buckets = 1U << hash_bits;
|
||||
hash_size = hash_buckets * sizeof(struct io_hash_bucket);
|
||||
|
||||
ctx->cancel_hash_bits = hash_bits;
|
||||
ctx->cancel_hash = kmalloc(hash_size, GFP_KERNEL);
|
||||
if (!ctx->cancel_hash)
|
||||
if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
|
||||
goto err;
|
||||
|
||||
init_hash_table(ctx->cancel_hash, hash_buckets);
|
||||
|
||||
ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
|
||||
if (!ctx->dummy_ubuf)
|
||||
goto err;
|
||||
|
@ -311,7 +316,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
|||
return ctx;
|
||||
err:
|
||||
kfree(ctx->dummy_ubuf);
|
||||
kfree(ctx->cancel_hash);
|
||||
kfree(ctx->cancel_table.hbs);
|
||||
kfree(ctx->io_bl);
|
||||
xa_destroy(&ctx->io_bl_xa);
|
||||
kfree(ctx);
|
||||
|
@ -2487,7 +2492,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
|||
io_req_caches_free(ctx);
|
||||
if (ctx->hash_map)
|
||||
io_wq_put_hash(ctx->hash_map);
|
||||
kfree(ctx->cancel_hash);
|
||||
kfree(ctx->cancel_table.hbs);
|
||||
kfree(ctx->dummy_ubuf);
|
||||
kfree(ctx->io_bl);
|
||||
xa_destroy(&ctx->io_bl_xa);
|
||||
|
|
|
@ -9,6 +9,16 @@
|
|||
#include "io-wq.h"
|
||||
#include "filetable.h"
|
||||
|
||||
struct io_hash_bucket {
|
||||
spinlock_t lock;
|
||||
struct hlist_head list;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct io_hash_table {
|
||||
struct io_hash_bucket *hbs;
|
||||
unsigned hash_bits;
|
||||
};
|
||||
|
||||
struct io_uring {
|
||||
u32 head ____cacheline_aligned_in_smp;
|
||||
u32 tail ____cacheline_aligned_in_smp;
|
||||
|
@ -224,8 +234,7 @@ struct io_ring_ctx {
|
|||
* manipulate the list, hence no extra locking is needed there.
|
||||
*/
|
||||
struct io_wq_work_list iopoll_list;
|
||||
struct io_hash_bucket *cancel_hash;
|
||||
unsigned cancel_hash_bits;
|
||||
struct io_hash_table cancel_table;
|
||||
bool poll_multi_queue;
|
||||
|
||||
struct list_head io_buffers_comp;
|
||||
|
|
|
@ -73,9 +73,9 @@ static struct io_poll *io_poll_get_single(struct io_kiocb *req)
|
|||
|
||||
static void io_poll_req_insert(struct io_kiocb *req)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
u32 index = hash_long(req->cqe.user_data, ctx->cancel_hash_bits);
|
||||
struct io_hash_bucket *hb = &ctx->cancel_hash[index];
|
||||
struct io_hash_table *table = &req->ctx->cancel_table;
|
||||
u32 index = hash_long(req->cqe.user_data, table->hash_bits);
|
||||
struct io_hash_bucket *hb = &table->hbs[index];
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
hlist_add_head(&req->hash_node, &hb->list);
|
||||
|
@ -84,8 +84,9 @@ static void io_poll_req_insert(struct io_kiocb *req)
|
|||
|
||||
static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
|
||||
{
|
||||
u32 index = hash_long(req->cqe.user_data, ctx->cancel_hash_bits);
|
||||
spinlock_t *lock = &ctx->cancel_hash[index].lock;
|
||||
struct io_hash_table *table = &req->ctx->cancel_table;
|
||||
u32 index = hash_long(req->cqe.user_data, table->hash_bits);
|
||||
spinlock_t *lock = &table->hbs[index].lock;
|
||||
|
||||
spin_lock(lock);
|
||||
hash_del(&req->hash_node);
|
||||
|
@ -539,13 +540,15 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
|
|||
__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
|
||||
bool cancel_all)
|
||||
{
|
||||
struct io_hash_table *table = &ctx->cancel_table;
|
||||
unsigned nr_buckets = 1U << table->hash_bits;
|
||||
struct hlist_node *tmp;
|
||||
struct io_kiocb *req;
|
||||
bool found = false;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
|
||||
struct io_hash_bucket *hb = &ctx->cancel_hash[i];
|
||||
for (i = 0; i < nr_buckets; i++) {
|
||||
struct io_hash_bucket *hb = &table->hbs[i];
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
|
||||
|
@ -562,12 +565,12 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
|
|||
|
||||
static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
|
||||
struct io_cancel_data *cd,
|
||||
struct io_hash_bucket hash_table[],
|
||||
struct io_hash_table *table,
|
||||
struct io_hash_bucket **out_bucket)
|
||||
{
|
||||
struct io_kiocb *req;
|
||||
u32 index = hash_long(cd->data, ctx->cancel_hash_bits);
|
||||
struct io_hash_bucket *hb = &hash_table[index];
|
||||
u32 index = hash_long(cd->data, table->hash_bits);
|
||||
struct io_hash_bucket *hb = &table->hbs[index];
|
||||
|
||||
*out_bucket = NULL;
|
||||
|
||||
|
@ -591,16 +594,17 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
|
|||
|
||||
static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
|
||||
struct io_cancel_data *cd,
|
||||
struct io_hash_bucket hash_table[],
|
||||
struct io_hash_table *table,
|
||||
struct io_hash_bucket **out_bucket)
|
||||
{
|
||||
unsigned nr_buckets = 1U << table->hash_bits;
|
||||
struct io_kiocb *req;
|
||||
int i;
|
||||
|
||||
*out_bucket = NULL;
|
||||
|
||||
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
|
||||
struct io_hash_bucket *hb = &hash_table[i];
|
||||
for (i = 0; i < nr_buckets; i++) {
|
||||
struct io_hash_bucket *hb = &table->hbs[i];
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
hlist_for_each_entry(req, &hb->list, hash_node) {
|
||||
|
@ -628,15 +632,15 @@ static bool io_poll_disarm(struct io_kiocb *req)
|
|||
}
|
||||
|
||||
static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
||||
struct io_hash_bucket hash_table[])
|
||||
struct io_hash_table *table)
|
||||
{
|
||||
struct io_hash_bucket *bucket;
|
||||
struct io_kiocb *req;
|
||||
|
||||
if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
|
||||
req = io_poll_file_find(ctx, cd, ctx->cancel_hash, &bucket);
|
||||
req = io_poll_file_find(ctx, cd, table, &bucket);
|
||||
else
|
||||
req = io_poll_find(ctx, false, cd, ctx->cancel_hash, &bucket);
|
||||
req = io_poll_find(ctx, false, cd, table, &bucket);
|
||||
|
||||
if (req)
|
||||
io_poll_cancel_req(req);
|
||||
|
@ -647,7 +651,7 @@ static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
|||
|
||||
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
|
||||
{
|
||||
return __io_poll_cancel(ctx, cd, ctx->cancel_hash);
|
||||
return __io_poll_cancel(ctx, cd, &ctx->cancel_table);
|
||||
}
|
||||
|
||||
static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
|
||||
|
@ -745,7 +749,7 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
|
|||
int ret2, ret = 0;
|
||||
bool locked;
|
||||
|
||||
preq = io_poll_find(ctx, true, &cd, ctx->cancel_hash, &bucket);
|
||||
preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
|
||||
if (preq)
|
||||
ret2 = io_poll_disarm(preq);
|
||||
if (bucket)
|
||||
|
|
Loading…
Reference in New Issue