mm: Use struct slab in kmem_obj_info()
All three implementations of slab support kmem_obj_info() which reports details of an object allocated from the slab allocator. By using the slab type instead of the page type, we make it obvious that this can only be called for slabs. [ vbabka@suse.cz: also convert the related kmem_valid_obj() to folios ] Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <guro@fb.com>
This commit is contained in:
parent
0c24811b12
commit
7213230af5
12
mm/slab.c
12
mm/slab.c
|
@ -3646,21 +3646,21 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
|
|||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
|
||||
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
|
||||
{
|
||||
struct kmem_cache *cachep;
|
||||
unsigned int objnr;
|
||||
void *objp;
|
||||
|
||||
kpp->kp_ptr = object;
|
||||
kpp->kp_page = page;
|
||||
cachep = page->slab_cache;
|
||||
kpp->kp_slab = slab;
|
||||
cachep = slab->slab_cache;
|
||||
kpp->kp_slab_cache = cachep;
|
||||
objp = object - obj_offset(cachep);
|
||||
kpp->kp_data_offset = obj_offset(cachep);
|
||||
page = virt_to_head_page(objp);
|
||||
objnr = obj_to_index(cachep, page, objp);
|
||||
objp = index_to_obj(cachep, page, objnr);
|
||||
slab = virt_to_slab(objp);
|
||||
objnr = obj_to_index(cachep, slab_page(slab), objp);
|
||||
objp = index_to_obj(cachep, slab_page(slab), objnr);
|
||||
kpp->kp_objp = objp;
|
||||
if (DEBUG && cachep->flags & SLAB_STORE_USER)
|
||||
kpp->kp_ret = *dbg_userword(cachep, objp);
|
||||
|
|
|
@ -801,7 +801,7 @@ static inline void debugfs_slab_release(struct kmem_cache *s) { }
|
|||
#define KS_ADDRS_COUNT 16
|
||||
struct kmem_obj_info {
|
||||
void *kp_ptr;
|
||||
struct page *kp_page;
|
||||
struct slab *kp_slab;
|
||||
void *kp_objp;
|
||||
unsigned long kp_data_offset;
|
||||
struct kmem_cache *kp_slab_cache;
|
||||
|
@ -809,7 +809,7 @@ struct kmem_obj_info {
|
|||
void *kp_stack[KS_ADDRS_COUNT];
|
||||
void *kp_free_stack[KS_ADDRS_COUNT];
|
||||
};
|
||||
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
|
||||
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
|
||||
#endif
|
||||
|
||||
#endif /* MM_SLAB_H */
|
||||
|
|
|
@ -550,13 +550,13 @@ bool slab_is_available(void)
|
|||
*/
|
||||
bool kmem_valid_obj(void *object)
|
||||
{
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
/* Some arches consider ZERO_SIZE_PTR to be a valid address. */
|
||||
if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
|
||||
return false;
|
||||
page = virt_to_head_page(object);
|
||||
return PageSlab(page);
|
||||
folio = virt_to_folio(object);
|
||||
return folio_test_slab(folio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmem_valid_obj);
|
||||
|
||||
|
@ -579,18 +579,18 @@ void kmem_dump_obj(void *object)
|
|||
{
|
||||
char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
|
||||
int i;
|
||||
struct page *page;
|
||||
struct slab *slab;
|
||||
unsigned long ptroffset;
|
||||
struct kmem_obj_info kp = { };
|
||||
|
||||
if (WARN_ON_ONCE(!virt_addr_valid(object)))
|
||||
return;
|
||||
page = virt_to_head_page(object);
|
||||
if (WARN_ON_ONCE(!PageSlab(page))) {
|
||||
slab = virt_to_slab(object);
|
||||
if (WARN_ON_ONCE(!slab)) {
|
||||
pr_cont(" non-slab memory.\n");
|
||||
return;
|
||||
}
|
||||
kmem_obj_info(&kp, object, page);
|
||||
kmem_obj_info(&kp, object, slab);
|
||||
if (kp.kp_slab_cache)
|
||||
pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
|
||||
else
|
||||
|
|
|
@ -462,10 +462,10 @@ out:
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
|
||||
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
|
||||
{
|
||||
kpp->kp_ptr = object;
|
||||
kpp->kp_page = page;
|
||||
kpp->kp_slab = slab;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
13
mm/slub.c
13
mm/slub.c
|
@ -4322,31 +4322,32 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
|
||||
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
|
||||
{
|
||||
void *base;
|
||||
int __maybe_unused i;
|
||||
unsigned int objnr;
|
||||
void *objp;
|
||||
void *objp0;
|
||||
struct kmem_cache *s = page->slab_cache;
|
||||
struct kmem_cache *s = slab->slab_cache;
|
||||
struct track __maybe_unused *trackp;
|
||||
|
||||
kpp->kp_ptr = object;
|
||||
kpp->kp_page = page;
|
||||
kpp->kp_slab = slab;
|
||||
kpp->kp_slab_cache = s;
|
||||
base = page_address(page);
|
||||
base = slab_address(slab);
|
||||
objp0 = kasan_reset_tag(object);
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
objp = restore_red_left(s, objp0);
|
||||
#else
|
||||
objp = objp0;
|
||||
#endif
|
||||
objnr = obj_to_index(s, page, objp);
|
||||
objnr = obj_to_index(s, slab_page(slab), objp);
|
||||
kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
|
||||
objp = base + s->size * objnr;
|
||||
kpp->kp_objp = objp;
|
||||
if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size) ||
|
||||
if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
|
||||
|| (objp - base) % s->size) ||
|
||||
!(s->flags & SLAB_STORE_USER))
|
||||
return;
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
|
|
Loading…
Reference in New Issue