mm/kasan: Convert to struct folio and struct slab
KASAN accesses some slab related struct page fields so we need to convert it to struct slab. Some places are a bit simplified thanks to kasan_addr_to_slab() encapsulating the PageSlab flag check through virt_to_slab(). When resolving object address to either a real slab or a large kmalloc, use struct folio as the intermediate type for testing the slab flag to avoid unnecessary implicit compound_head(). [ vbabka@suse.cz: use struct folio, adjust to differences in previous patches ] Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com> Reviewed-by: Roman Gushchin <guro@fb.com> Tested-by: Hyeongogn Yoo <42.hyeyoo@gmail.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: <kasan-dev@googlegroups.com>
This commit is contained in:
parent
50757018b4
commit
6e48a966df
|
@ -9,6 +9,7 @@
|
|||
|
||||
struct kmem_cache;
|
||||
struct page;
|
||||
struct slab;
|
||||
struct vm_struct;
|
||||
struct task_struct;
|
||||
|
||||
|
@ -193,11 +194,11 @@ static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void __kasan_poison_slab(struct page *page);
|
||||
static __always_inline void kasan_poison_slab(struct page *page)
|
||||
void __kasan_poison_slab(struct slab *slab);
|
||||
static __always_inline void kasan_poison_slab(struct slab *slab)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_poison_slab(page);
|
||||
__kasan_poison_slab(slab);
|
||||
}
|
||||
|
||||
void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
|
||||
|
@ -322,7 +323,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
|
|||
slab_flags_t *flags) {}
|
||||
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
|
||||
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
|
||||
static inline void kasan_poison_slab(struct page *page) {}
|
||||
static inline void kasan_poison_slab(struct slab *slab) {}
|
||||
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
|
||||
void *object) {}
|
||||
static inline void kasan_poison_object_data(struct kmem_cache *cache,
|
||||
|
|
|
@ -247,8 +247,9 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
|
|||
}
|
||||
#endif
|
||||
|
||||
void __kasan_poison_slab(struct page *page)
|
||||
void __kasan_poison_slab(struct slab *slab)
|
||||
{
|
||||
struct page *page = slab_page(slab);
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < compound_nr(page); i++)
|
||||
|
@ -401,9 +402,9 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
|
|||
|
||||
void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
|
||||
{
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
folio = virt_to_folio(ptr);
|
||||
|
||||
/*
|
||||
* Even though this function is only called for kmem_cache_alloc and
|
||||
|
@ -411,12 +412,14 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
|
|||
* !PageSlab() when the size provided to kmalloc is larger than
|
||||
* KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
|
||||
*/
|
||||
if (unlikely(!PageSlab(page))) {
|
||||
if (unlikely(!folio_test_slab(folio))) {
|
||||
if (____kasan_kfree_large(ptr, ip))
|
||||
return;
|
||||
kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
|
||||
kasan_poison(ptr, folio_size(folio), KASAN_FREE_PAGE, false);
|
||||
} else {
|
||||
____kasan_slab_free(page->slab_cache, ptr, ip, false, false);
|
||||
struct slab *slab = folio_slab(folio);
|
||||
|
||||
____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -560,7 +563,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
|
|||
|
||||
void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
|
||||
{
|
||||
struct page *page;
|
||||
struct slab *slab;
|
||||
|
||||
if (unlikely(object == ZERO_SIZE_PTR))
|
||||
return (void *)object;
|
||||
|
@ -572,13 +575,13 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
|
|||
*/
|
||||
kasan_unpoison(object, size, false);
|
||||
|
||||
page = virt_to_head_page(object);
|
||||
slab = virt_to_slab(object);
|
||||
|
||||
/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
|
||||
if (unlikely(!PageSlab(page)))
|
||||
if (unlikely(!slab))
|
||||
return __kasan_kmalloc_large(object, size, flags);
|
||||
else
|
||||
return ____kasan_kmalloc(page->slab_cache, object, size, flags);
|
||||
return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
|
||||
}
|
||||
|
||||
bool __kasan_check_byte(const void *address, unsigned long ip)
|
||||
|
|
|
@ -330,16 +330,16 @@ DEFINE_ASAN_SET_SHADOW(f8);
|
|||
|
||||
static void __kasan_record_aux_stack(void *addr, bool can_alloc)
|
||||
{
|
||||
struct page *page = kasan_addr_to_page(addr);
|
||||
struct slab *slab = kasan_addr_to_slab(addr);
|
||||
struct kmem_cache *cache;
|
||||
struct kasan_alloc_meta *alloc_meta;
|
||||
void *object;
|
||||
|
||||
if (is_kfence_address(addr) || !(page && PageSlab(page)))
|
||||
if (is_kfence_address(addr) || !slab)
|
||||
return;
|
||||
|
||||
cache = page->slab_cache;
|
||||
object = nearest_obj(cache, page_slab(page), addr);
|
||||
cache = slab->slab_cache;
|
||||
object = nearest_obj(cache, slab, addr);
|
||||
alloc_meta = kasan_get_alloc_meta(cache, object);
|
||||
if (!alloc_meta)
|
||||
return;
|
||||
|
|
|
@ -265,6 +265,7 @@ bool kasan_report(unsigned long addr, size_t size,
|
|||
void kasan_report_invalid_free(void *object, unsigned long ip);
|
||||
|
||||
struct page *kasan_addr_to_page(const void *addr);
|
||||
struct slab *kasan_addr_to_slab(const void *addr);
|
||||
|
||||
depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
|
||||
void kasan_set_track(struct kasan_track *track, gfp_t flags);
|
||||
|
|
|
@ -117,7 +117,7 @@ static unsigned long quarantine_batch_size;
|
|||
|
||||
static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
|
||||
{
|
||||
return virt_to_head_page(qlink)->slab_cache;
|
||||
return virt_to_slab(qlink)->slab_cache;
|
||||
}
|
||||
|
||||
static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
|
||||
|
|
|
@ -150,6 +150,14 @@ struct page *kasan_addr_to_page(const void *addr)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
struct slab *kasan_addr_to_slab(const void *addr)
|
||||
{
|
||||
if ((addr >= (void *)PAGE_OFFSET) &&
|
||||
(addr < high_memory))
|
||||
return virt_to_slab(addr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void describe_object_addr(struct kmem_cache *cache, void *object,
|
||||
const void *addr)
|
||||
{
|
||||
|
@ -248,8 +256,9 @@ static void print_address_description(void *addr, u8 tag)
|
|||
pr_err("\n");
|
||||
|
||||
if (page && PageSlab(page)) {
|
||||
struct kmem_cache *cache = page->slab_cache;
|
||||
void *object = nearest_obj(cache, page_slab(page), addr);
|
||||
struct slab *slab = page_slab(page);
|
||||
struct kmem_cache *cache = slab->slab_cache;
|
||||
void *object = nearest_obj(cache, slab, addr);
|
||||
|
||||
describe_object(cache, object, addr, tag);
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
|
|||
#ifdef CONFIG_KASAN_TAGS_IDENTIFY
|
||||
struct kasan_alloc_meta *alloc_meta;
|
||||
struct kmem_cache *cache;
|
||||
struct page *page;
|
||||
struct slab *slab;
|
||||
const void *addr;
|
||||
void *object;
|
||||
u8 tag;
|
||||
|
@ -20,10 +20,10 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
|
|||
|
||||
tag = get_tag(info->access_addr);
|
||||
addr = kasan_reset_tag(info->access_addr);
|
||||
page = kasan_addr_to_page(addr);
|
||||
if (page && PageSlab(page)) {
|
||||
cache = page->slab_cache;
|
||||
object = nearest_obj(cache, page_slab(page), (void *)addr);
|
||||
slab = kasan_addr_to_slab(addr);
|
||||
if (slab) {
|
||||
cache = slab->slab_cache;
|
||||
object = nearest_obj(cache, slab, (void *)addr);
|
||||
alloc_meta = kasan_get_alloc_meta(cache, object);
|
||||
|
||||
if (alloc_meta) {
|
||||
|
|
|
@ -2604,7 +2604,7 @@ static struct slab *cache_grow_begin(struct kmem_cache *cachep,
|
|||
* page_address() in the latter returns a non-tagged pointer,
|
||||
* as it should be for slab pages.
|
||||
*/
|
||||
kasan_poison_slab(slab_page(slab));
|
||||
kasan_poison_slab(slab);
|
||||
|
||||
/* Get slab management. */
|
||||
freelist = alloc_slabmgmt(cachep, slab, offset,
|
||||
|
|
Loading…
Reference in New Issue