io_uring: better caching for ctx timeout fields
Following timeout fields access patterns, move all of them into a separate cache line inside ctx, so they don't intervene with normal completion caching, especially since timeout removals and completion are separated and the later is done via tw. It also sheds some bytes from io_ring_ctx, 1216B -> 1152B Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/4b163793072840de53b3cb66e0c2995e7226ff78.1655310733.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b25436038f
commit
aff5b2df9e
|
@ -181,8 +181,6 @@ struct io_ring_ctx {
|
|||
struct xarray io_bl_xa;
|
||||
struct list_head io_buffers_cache;
|
||||
|
||||
struct list_head timeout_list;
|
||||
struct list_head ltimeout_list;
|
||||
struct list_head cq_overflow_list;
|
||||
struct list_head apoll_cache;
|
||||
struct xarray personalities;
|
||||
|
@ -215,15 +213,11 @@ struct io_ring_ctx {
|
|||
struct io_ev_fd __rcu *io_ev_fd;
|
||||
struct wait_queue_head cq_wait;
|
||||
unsigned cq_extra;
|
||||
atomic_t cq_timeouts;
|
||||
unsigned cq_last_tm_flush;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct {
|
||||
spinlock_t completion_lock;
|
||||
|
||||
spinlock_t timeout_lock;
|
||||
|
||||
/*
|
||||
* ->iopoll_list is protected by the ctx->uring_lock for
|
||||
* io_uring instances that don't use IORING_SETUP_SQPOLL.
|
||||
|
@ -255,6 +249,15 @@ struct io_ring_ctx {
|
|||
struct list_head io_buffers_pages;
|
||||
};
|
||||
|
||||
/* timeouts */
|
||||
struct {
|
||||
spinlock_t timeout_lock;
|
||||
atomic_t cq_timeouts;
|
||||
struct list_head timeout_list;
|
||||
struct list_head ltimeout_list;
|
||||
unsigned cq_last_tm_flush;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/* Keep this last, we don't need it for the fast path */
|
||||
struct {
|
||||
#if defined(CONFIG_UNIX)
|
||||
|
|
Loading…
Reference in New Issue