mm/slab_common: remove CONFIG_NUMA ifdefs for common kmalloc functions

Now that slab_alloc_node() is available for SLAB when CONFIG_NUMA=n,
remove CONFIG_NUMA ifdefs for common kmalloc functions.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Hyeonggon Yoo 2022-08-17 19:18:12 +09:00 committed by Vlastimil Babka
parent 07588d726f
commit f78a03f6e2
4 changed files with 1 additions and 40 deletions

View File

@ -456,38 +456,18 @@ static __always_inline void kfree_bulk(size_t size, void **p)
kmem_cache_free_bulk(NULL, size, p); kmem_cache_free_bulk(NULL, size, p);
} }
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
__alloc_size(1); __alloc_size(1);
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc; __malloc;
#else
static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __kmalloc(size, flags);
}
static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
{
return kmem_cache_alloc(s, flags);
}
#endif
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_slab_alignment __alloc_size(3); __assume_slab_alignment __alloc_size(3);
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_slab_alignment int node, size_t size) __assume_slab_alignment
__alloc_size(4); __alloc_size(4);
#else
static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags, int node, size_t size)
{
return kmem_cache_alloc_trace(s, gfpflags, size);
}
#endif /* CONFIG_NUMA */
#else /* CONFIG_TRACING */ #else /* CONFIG_TRACING */
static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s, static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
@ -701,20 +681,12 @@ static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t
} }
#ifdef CONFIG_NUMA
extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
unsigned long caller) __alloc_size(1); unsigned long caller) __alloc_size(1);
#define kmalloc_node_track_caller(size, flags, node) \ #define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \ __kmalloc_node_track_caller(size, flags, node, \
_RET_IP_) _RET_IP_)
#else /* CONFIG_NUMA */
#define kmalloc_node_track_caller(size, flags, node) \
kmalloc_track_caller(size, flags)
#endif /* CONFIG_NUMA */
/* /*
* Shortcuts * Shortcuts
*/ */

View File

@ -3535,7 +3535,6 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
EXPORT_SYMBOL(kmem_cache_alloc_trace); EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif #endif
#ifdef CONFIG_NUMA
/** /**
* kmem_cache_alloc_node - Allocate an object on the specified node * kmem_cache_alloc_node - Allocate an object on the specified node
* @cachep: The cache to allocate from. * @cachep: The cache to allocate from.
@ -3609,7 +3608,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
return __do_kmalloc_node(size, flags, node, caller); return __do_kmalloc_node(size, flags, node, caller);
} }
EXPORT_SYMBOL(__kmalloc_node_track_caller); EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)

View File

@ -536,14 +536,12 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
} }
EXPORT_SYMBOL(__kmalloc_track_caller); EXPORT_SYMBOL(__kmalloc_track_caller);
#ifdef CONFIG_NUMA
void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
int node, unsigned long caller) int node, unsigned long caller)
{ {
return __do_kmalloc_node(size, gfp, node, caller); return __do_kmalloc_node(size, gfp, node, caller);
} }
EXPORT_SYMBOL(__kmalloc_node_track_caller); EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif
void kfree(const void *block) void kfree(const void *block)
{ {
@ -647,7 +645,7 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, gfp_
return slob_alloc_node(cachep, flags, NUMA_NO_NODE); return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
} }
EXPORT_SYMBOL(kmem_cache_alloc_lru); EXPORT_SYMBOL(kmem_cache_alloc_lru);
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t gfp, int node) void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{ {
return __do_kmalloc_node(size, gfp, node, _RET_IP_); return __do_kmalloc_node(size, gfp, node, _RET_IP_);
@ -659,7 +657,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
return slob_alloc_node(cachep, gfp, node); return slob_alloc_node(cachep, gfp, node);
} }
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
static void __kmem_cache_free(void *b, int size) static void __kmem_cache_free(void *b, int size)
{ {

View File

@ -3287,7 +3287,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
EXPORT_SYMBOL(kmem_cache_alloc_trace); EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif #endif
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{ {
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
@ -3314,7 +3313,6 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
} }
EXPORT_SYMBOL(kmem_cache_alloc_node_trace); EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif #endif
#endif /* CONFIG_NUMA */
/* /*
* Slow path handling. This may still be called frequently since objects * Slow path handling. This may still be called frequently since objects
@ -4427,7 +4425,6 @@ void *__kmalloc(size_t size, gfp_t flags)
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
#ifdef CONFIG_NUMA
static void *kmalloc_large_node(size_t size, gfp_t flags, int node) static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{ {
struct page *page; struct page *page;
@ -4474,7 +4471,6 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
return ret; return ret;
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#endif /* CONFIG_NUMA */
#ifdef CONFIG_HARDENED_USERCOPY #ifdef CONFIG_HARDENED_USERCOPY
/* /*
@ -4930,7 +4926,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
} }
EXPORT_SYMBOL(__kmalloc_track_caller); EXPORT_SYMBOL(__kmalloc_track_caller);
#ifdef CONFIG_NUMA
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, unsigned long caller) int node, unsigned long caller)
{ {
@ -4960,7 +4955,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return ret; return ret;
} }
EXPORT_SYMBOL(__kmalloc_node_track_caller); EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
static int count_inuse(struct slab *slab) static int count_inuse(struct slab *slab)