Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg: "The patches from Joonsoo Kim switch mm/slab.c to use 'struct page' for slab internals similar to mm/slub.c. This reduces memory usage and improves performance: https://lkml.org/lkml/2013/10/16/155 Rest of the changes are bug fixes from various people" * 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (21 commits) mm, slub: fix the typo in mm/slub.c mm, slub: fix the typo in include/linux/slub_def.h slub: Handle NULL parameter in kmem_cache_flags slab: replace non-existing 'struct freelist *' with 'void *' slab: fix to calm down kmemleak warning slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled slab: rename slab_bufctl to slab_freelist slab: remove useless statement for checking pfmemalloc slab: use struct page for slab management slab: replace free and inuse in struct slab with newly introduced active slab: remove SLAB_LIMIT slab: remove kmem_bufctl_t slab: change the management method of free objects of the slab slab: use __GFP_COMP flag for allocating slab pages slab: use well-defined macro, virt_to_slab() slab: overloading the RCU head over the LRU for RCU free slab: remove cachep in struct slab_rcu slab: remove nodeid in struct slab slab: remove colouroff in struct slab slab: change return type of kmem_getpages() to struct page ...
This commit is contained in:
commit
24f971abbd
|
@ -44,18 +44,22 @@ struct page {
|
|||
/* First double word block */
|
||||
unsigned long flags; /* Atomic flags, some possibly
|
||||
* updated asynchronously */
|
||||
struct address_space *mapping; /* If low bit clear, points to
|
||||
* inode address_space, or NULL.
|
||||
* If page mapped as anonymous
|
||||
* memory, low bit is set, and
|
||||
* it points to anon_vma object:
|
||||
* see PAGE_MAPPING_ANON below.
|
||||
*/
|
||||
union {
|
||||
struct address_space *mapping; /* If low bit clear, points to
|
||||
* inode address_space, or NULL.
|
||||
* If page mapped as anonymous
|
||||
* memory, low bit is set, and
|
||||
* it points to anon_vma object:
|
||||
* see PAGE_MAPPING_ANON below.
|
||||
*/
|
||||
void *s_mem; /* slab first object */
|
||||
};
|
||||
|
||||
/* Second double word */
|
||||
struct {
|
||||
union {
|
||||
pgoff_t index; /* Our offset within mapping. */
|
||||
void *freelist; /* slub/slob first free object */
|
||||
void *freelist; /* sl[aou]b first free object */
|
||||
bool pfmemalloc; /* If set by the page allocator,
|
||||
* ALLOC_NO_WATERMARKS was set
|
||||
* and the low watermark was not
|
||||
|
@ -111,6 +115,7 @@ struct page {
|
|||
};
|
||||
atomic_t _count; /* Usage count, see below. */
|
||||
};
|
||||
unsigned int active; /* SLAB */
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -132,6 +137,9 @@ struct page {
|
|||
|
||||
struct list_head list; /* slobs list of pages */
|
||||
struct slab *slab_page; /* slab fields */
|
||||
struct rcu_head rcu_head; /* Used by SLAB
|
||||
* when destroying via RCU
|
||||
*/
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
|
||||
pgtable_t pmd_huge_pte; /* protected by page->ptl */
|
||||
#endif
|
||||
|
|
|
@ -53,7 +53,14 @@
|
|||
* }
|
||||
* rcu_read_unlock();
|
||||
*
|
||||
* See also the comment on struct slab_rcu in mm/slab.c.
|
||||
* This is useful if we need to approach a kernel structure obliquely,
|
||||
* from its address obtained without the usual locking. We can lock
|
||||
* the structure to stabilize it and check it's still at the given address,
|
||||
* only if we can be sure that the memory has not been meanwhile reused
|
||||
* for some other kind of object (which our subsystem's lock might corrupt).
|
||||
*
|
||||
* rcu_read_lock before reading the address, then rcu_read_unlock after
|
||||
* taking the spinlock within the structure expected at that address.
|
||||
*/
|
||||
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
|
||||
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
|
||||
|
|
|
@ -27,8 +27,8 @@ struct kmem_cache {
|
|||
|
||||
size_t colour; /* cache colouring range */
|
||||
unsigned int colour_off; /* colour offset */
|
||||
struct kmem_cache *slabp_cache;
|
||||
unsigned int slab_size;
|
||||
struct kmem_cache *freelist_cache;
|
||||
unsigned int freelist_size;
|
||||
|
||||
/* constructor func */
|
||||
void (*ctor)(void *obj);
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
enum stat_item {
|
||||
ALLOC_FASTPATH, /* Allocation from cpu slab */
|
||||
ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
|
||||
FREE_FASTPATH, /* Free to cpu slub */
|
||||
FREE_FASTPATH, /* Free to cpu slab */
|
||||
FREE_SLOWPATH, /* Freeing not to cpu slab */
|
||||
FREE_FROZEN, /* Freeing to frozen slab */
|
||||
FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
|
||||
|
|
45
mm/slub.c
45
mm/slub.c
|
@ -155,7 +155,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
|
|||
/*
|
||||
* Maximum number of desirable partial slabs.
|
||||
* The existence of more partial slabs makes kmem_cache_shrink
|
||||
* sort the partial list by the number of objects in the.
|
||||
* sort the partial list by the number of objects in use.
|
||||
*/
|
||||
#define MAX_PARTIAL 10
|
||||
|
||||
|
@ -933,6 +933,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
|
|||
* Hooks for other subsystems that check memory allocations. In a typical
|
||||
* production configuration these hooks all should produce no code at all.
|
||||
*/
|
||||
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
||||
{
|
||||
kmemleak_alloc(ptr, size, 1, flags);
|
||||
}
|
||||
|
||||
static inline void kfree_hook(const void *x)
|
||||
{
|
||||
kmemleak_free(x);
|
||||
}
|
||||
|
||||
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
|
||||
{
|
||||
flags &= gfp_allowed_mask;
|
||||
|
@ -1217,8 +1227,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size,
|
|||
/*
|
||||
* Enable debugging if selected on the kernel commandline.
|
||||
*/
|
||||
if (slub_debug && (!slub_debug_slabs ||
|
||||
!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
|
||||
if (slub_debug && (!slub_debug_slabs || (name &&
|
||||
!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
|
||||
flags |= slub_debug;
|
||||
|
||||
return flags;
|
||||
|
@ -1260,13 +1270,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
|
|||
static inline void dec_slabs_node(struct kmem_cache *s, int node,
|
||||
int objects) {}
|
||||
|
||||
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
|
||||
{
|
||||
kmemleak_alloc(ptr, size, 1, flags);
|
||||
}
|
||||
|
||||
static inline void kfree_hook(const void *x)
|
||||
{
|
||||
kmemleak_free(x);
|
||||
}
|
||||
|
||||
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
|
||||
{ return 0; }
|
||||
|
||||
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
|
||||
void *object) {}
|
||||
void *object)
|
||||
{
|
||||
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
|
||||
flags & gfp_allowed_mask);
|
||||
}
|
||||
|
||||
static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
|
||||
static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
||||
{
|
||||
kmemleak_free_recursive(x, s->flags);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SLUB_DEBUG */
|
||||
|
||||
|
@ -2829,8 +2856,8 @@ static struct kmem_cache *kmem_cache_node;
|
|||
* slab on the node for this slabcache. There are no concurrent accesses
|
||||
* possible.
|
||||
*
|
||||
* Note that this function only works on the kmalloc_node_cache
|
||||
* when allocating for the kmalloc_node_cache. This is used for bootstrapping
|
||||
* Note that this function only works on the kmem_cache_node
|
||||
* when allocating for the kmem_cache_node. This is used for bootstrapping
|
||||
* memory on a fresh node that has no slab structures yet.
|
||||
*/
|
||||
static void early_kmem_cache_node_alloc(int node)
|
||||
|
@ -3272,7 +3299,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
|
|||
if (page)
|
||||
ptr = page_address(page);
|
||||
|
||||
kmemleak_alloc(ptr, size, 1, flags);
|
||||
kmalloc_large_node_hook(ptr, size, flags);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
@ -3336,7 +3363,7 @@ void kfree(const void *x)
|
|||
page = virt_to_head_page(x);
|
||||
if (unlikely(!PageSlab(page))) {
|
||||
BUG_ON(!PageCompound(page));
|
||||
kmemleak_free(x);
|
||||
kfree_hook(x);
|
||||
__free_memcg_kmem_pages(page, compound_order(page));
|
||||
return;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue