slab: move struct kmem_cache to headers
Move the SLAB struct kmem_cache definition to <linux/slab_def.h> like with SLUB so kmemcheck can access ->ctor and ->flags. Cc: Ingo Molnar <mingo@elte.hu> Cc: Christoph Lameter <clameter@sgi.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> [rebased for mainline inclusion] Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
This commit is contained in:
parent
b618ad31bb
commit
8eae985f08
|
@ -16,6 +16,87 @@
|
|||
#include <linux/compiler.h>
|
||||
#include <linux/kmemtrace.h>
|
||||
|
||||
/*
|
||||
* struct kmem_cache
|
||||
*
|
||||
* manages a cache.
|
||||
*/
|
||||
|
||||
struct kmem_cache {
|
||||
/* 1) per-cpu data, touched during every alloc/free */
|
||||
struct array_cache *array[NR_CPUS];
|
||||
/* 2) Cache tunables. Protected by cache_chain_mutex */
|
||||
unsigned int batchcount;
|
||||
unsigned int limit;
|
||||
unsigned int shared;
|
||||
|
||||
unsigned int buffer_size;
|
||||
u32 reciprocal_buffer_size;
|
||||
/* 3) touched by every alloc & free from the backend */
|
||||
|
||||
unsigned int flags; /* constant flags */
|
||||
unsigned int num; /* # of objs per slab */
|
||||
|
||||
/* 4) cache_grow/shrink */
|
||||
/* order of pgs per slab (2^n) */
|
||||
unsigned int gfporder;
|
||||
|
||||
/* force GFP flags, e.g. GFP_DMA */
|
||||
gfp_t gfpflags;
|
||||
|
||||
size_t colour; /* cache colouring range */
|
||||
unsigned int colour_off; /* colour offset */
|
||||
struct kmem_cache *slabp_cache;
|
||||
unsigned int slab_size;
|
||||
unsigned int dflags; /* dynamic flags */
|
||||
|
||||
/* constructor func */
|
||||
void (*ctor)(void *obj);
|
||||
|
||||
/* 5) cache creation/removal */
|
||||
const char *name;
|
||||
struct list_head next;
|
||||
|
||||
/* 6) statistics */
|
||||
#ifdef CONFIG_DEBUG_SLAB
|
||||
unsigned long num_active;
|
||||
unsigned long num_allocations;
|
||||
unsigned long high_mark;
|
||||
unsigned long grown;
|
||||
unsigned long reaped;
|
||||
unsigned long errors;
|
||||
unsigned long max_freeable;
|
||||
unsigned long node_allocs;
|
||||
unsigned long node_frees;
|
||||
unsigned long node_overflow;
|
||||
atomic_t allochit;
|
||||
atomic_t allocmiss;
|
||||
atomic_t freehit;
|
||||
atomic_t freemiss;
|
||||
|
||||
/*
|
||||
* If debugging is enabled, then the allocator can add additional
|
||||
* fields and/or padding to every object. buffer_size contains the total
|
||||
* object size including these internal fields, the following two
|
||||
* variables contain the offset to the user object and its size.
|
||||
*/
|
||||
int obj_offset;
|
||||
int obj_size;
|
||||
#endif /* CONFIG_DEBUG_SLAB */
|
||||
|
||||
/*
|
||||
* We put nodelists[] at the end of kmem_cache, because we want to size
|
||||
* this array to nr_node_ids slots instead of MAX_NUMNODES
|
||||
* (see kmem_cache_init())
|
||||
* We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
|
||||
* is statically defined, so we reserve the max number of nodes.
|
||||
*/
|
||||
struct kmem_list3 *nodelists[MAX_NUMNODES];
|
||||
/*
|
||||
* Do not add fields after nodelists[]
|
||||
*/
|
||||
};
|
||||
|
||||
/* Size description struct for general caches. */
|
||||
struct cache_sizes {
|
||||
size_t cs_size;
|
||||
|
|
81
mm/slab.c
81
mm/slab.c
|
@ -374,87 +374,6 @@ static void kmem_list3_init(struct kmem_list3 *parent)
|
|||
MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* struct kmem_cache
|
||||
*
|
||||
* manages a cache.
|
||||
*/
|
||||
|
||||
struct kmem_cache {
|
||||
/* 1) per-cpu data, touched during every alloc/free */
|
||||
struct array_cache *array[NR_CPUS];
|
||||
/* 2) Cache tunables. Protected by cache_chain_mutex */
|
||||
unsigned int batchcount;
|
||||
unsigned int limit;
|
||||
unsigned int shared;
|
||||
|
||||
unsigned int buffer_size;
|
||||
u32 reciprocal_buffer_size;
|
||||
/* 3) touched by every alloc & free from the backend */
|
||||
|
||||
unsigned int flags; /* constant flags */
|
||||
unsigned int num; /* # of objs per slab */
|
||||
|
||||
/* 4) cache_grow/shrink */
|
||||
/* order of pgs per slab (2^n) */
|
||||
unsigned int gfporder;
|
||||
|
||||
/* force GFP flags, e.g. GFP_DMA */
|
||||
gfp_t gfpflags;
|
||||
|
||||
size_t colour; /* cache colouring range */
|
||||
unsigned int colour_off; /* colour offset */
|
||||
struct kmem_cache *slabp_cache;
|
||||
unsigned int slab_size;
|
||||
unsigned int dflags; /* dynamic flags */
|
||||
|
||||
/* constructor func */
|
||||
void (*ctor)(void *obj);
|
||||
|
||||
/* 5) cache creation/removal */
|
||||
const char *name;
|
||||
struct list_head next;
|
||||
|
||||
/* 6) statistics */
|
||||
#if STATS
|
||||
unsigned long num_active;
|
||||
unsigned long num_allocations;
|
||||
unsigned long high_mark;
|
||||
unsigned long grown;
|
||||
unsigned long reaped;
|
||||
unsigned long errors;
|
||||
unsigned long max_freeable;
|
||||
unsigned long node_allocs;
|
||||
unsigned long node_frees;
|
||||
unsigned long node_overflow;
|
||||
atomic_t allochit;
|
||||
atomic_t allocmiss;
|
||||
atomic_t freehit;
|
||||
atomic_t freemiss;
|
||||
#endif
|
||||
#if DEBUG
|
||||
/*
|
||||
* If debugging is enabled, then the allocator can add additional
|
||||
* fields and/or padding to every object. buffer_size contains the total
|
||||
* object size including these internal fields, the following two
|
||||
* variables contain the offset to the user object and its size.
|
||||
*/
|
||||
int obj_offset;
|
||||
int obj_size;
|
||||
#endif
|
||||
/*
|
||||
* We put nodelists[] at the end of kmem_cache, because we want to size
|
||||
* this array to nr_node_ids slots instead of MAX_NUMNODES
|
||||
* (see kmem_cache_init())
|
||||
* We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
|
||||
* is statically defined, so we reserve the max number of nodes.
|
||||
*/
|
||||
struct kmem_list3 *nodelists[MAX_NUMNODES];
|
||||
/*
|
||||
* Do not add fields after nodelists[]
|
||||
*/
|
||||
};
|
||||
|
||||
#define CFLGS_OFF_SLAB (0x80000000UL)
|
||||
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
|
||||
|
||||
|
|
Loading…
Reference in New Issue