io_uring: Add KASAN support for alloc_caches

Add support for KASAN in the alloc_caches (apoll and netmsg_cache).
Thus, if something touches the unused caches, it will raise a KASAN
warning/exception.

It poisons the object when the object is put to the cache, and unpoisons
it when the object is gotten or freed.

Signed-off-by: Breno Leitao <leitao@debian.org>
Reviewed-by: Gabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20230223164353.2839177-2-leitao@debian.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Breno Leitao 2023-02-23 08:43:53 -08:00 committed by Jens Axboe
parent efba1a9e65
commit e1fe7ee885
4 changed files with 12 additions and 4 deletions

View File

@ -190,6 +190,7 @@ struct io_ev_fd {
struct io_alloc_cache { struct io_alloc_cache {
struct io_wq_work_node list; struct io_wq_work_node list;
unsigned int nr_cached; unsigned int nr_cached;
size_t elem_size;
}; };
struct io_ring_ctx { struct io_ring_ctx {

View File

@ -16,6 +16,8 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
if (cache->nr_cached < IO_ALLOC_CACHE_MAX) { if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
cache->nr_cached++; cache->nr_cached++;
wq_stack_add_head(&entry->node, &cache->list); wq_stack_add_head(&entry->node, &cache->list);
/* KASAN poisons object */
kasan_slab_free_mempool(entry);
return true; return true;
} }
return false; return false;
@ -27,6 +29,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
struct io_cache_entry *entry; struct io_cache_entry *entry;
entry = container_of(cache->list.next, struct io_cache_entry, node); entry = container_of(cache->list.next, struct io_cache_entry, node);
kasan_unpoison_range(entry, cache->elem_size);
cache->list.next = cache->list.next->next; cache->list.next = cache->list.next->next;
cache->nr_cached--; cache->nr_cached--;
return entry; return entry;
@ -35,10 +38,11 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
return NULL; return NULL;
} }
static inline void io_alloc_cache_init(struct io_alloc_cache *cache) static inline void io_alloc_cache_init(struct io_alloc_cache *cache, size_t size)
{ {
cache->list.next = NULL; cache->list.next = NULL;
cache->nr_cached = 0; cache->nr_cached = 0;
cache->elem_size = size;
} }
static inline void io_alloc_cache_free(struct io_alloc_cache *cache, static inline void io_alloc_cache_free(struct io_alloc_cache *cache,

View File

@ -310,8 +310,8 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->sqd_list); INIT_LIST_HEAD(&ctx->sqd_list);
INIT_LIST_HEAD(&ctx->cq_overflow_list); INIT_LIST_HEAD(&ctx->cq_overflow_list);
INIT_LIST_HEAD(&ctx->io_buffers_cache); INIT_LIST_HEAD(&ctx->io_buffers_cache);
io_alloc_cache_init(&ctx->apoll_cache); io_alloc_cache_init(&ctx->apoll_cache, sizeof(struct async_poll));
io_alloc_cache_init(&ctx->netmsg_cache); io_alloc_cache_init(&ctx->netmsg_cache, sizeof(struct io_async_msghdr));
init_completion(&ctx->ref_comp); init_completion(&ctx->ref_comp);
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock); mutex_init(&ctx->uring_lock);

View File

@ -5,8 +5,8 @@
#include "alloc_cache.h" #include "alloc_cache.h"
#if defined(CONFIG_NET)
struct io_async_msghdr { struct io_async_msghdr {
#if defined(CONFIG_NET)
union { union {
struct iovec fast_iov[UIO_FASTIOV]; struct iovec fast_iov[UIO_FASTIOV];
struct { struct {
@ -22,8 +22,11 @@ struct io_async_msghdr {
struct sockaddr __user *uaddr; struct sockaddr __user *uaddr;
struct msghdr msg; struct msghdr msg;
struct sockaddr_storage addr; struct sockaddr_storage addr;
#endif
}; };
#if defined(CONFIG_NET)
struct io_async_connect { struct io_async_connect {
struct sockaddr_storage address; struct sockaddr_storage address;
}; };