Merge branch 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm
* 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm: slub: fix typo in Documentation/vm/slub.txt slab: NUMA slab allocator migration bugfix slub: Do not cross cacheline boundaries for very small objects slab - use angle brackets for include of kmalloc_sizes.h slab numa fallback logic: Do not pass unfiltered flags to page allocator slub statistics: Fix check for DEACTIVATE_REMOTE_FREES
This commit is contained in:
commit
bb799dcadd
|
@ -50,14 +50,14 @@ F.e. in order to boot just with sanity checks and red zoning one would specify:
|
|||
|
||||
Trying to find an issue in the dentry cache? Try
|
||||
|
||||
slub_debug=,dentry_cache
|
||||
slub_debug=,dentry
|
||||
|
||||
to only enable debugging on the dentry cache.
|
||||
|
||||
Red zoning and tracking may realign the slab. We can just apply sanity checks
|
||||
to the dentry cache with
|
||||
|
||||
slub_debug=F,dentry_cache
|
||||
slub_debug=F,dentry
|
||||
|
||||
In case you forgot to enable debugging on the kernel command line: It is
|
||||
possible to enable debugging manually when the kernel is up. Look at the
|
||||
|
|
|
@ -41,7 +41,7 @@ static inline void *kmalloc(size_t size, gfp_t flags)
|
|||
goto found; \
|
||||
else \
|
||||
i++;
|
||||
#include "kmalloc_sizes.h"
|
||||
#include <linux/kmalloc_sizes.h>
|
||||
#undef CACHE
|
||||
{
|
||||
extern void __you_cannot_kmalloc_that_much(void);
|
||||
|
@ -75,7 +75,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|||
goto found; \
|
||||
else \
|
||||
i++;
|
||||
#include "kmalloc_sizes.h"
|
||||
#include <linux/kmalloc_sizes.h>
|
||||
#undef CACHE
|
||||
{
|
||||
extern void __you_cannot_kmalloc_that_much(void);
|
||||
|
|
11
mm/slab.c
11
mm/slab.c
|
@ -333,7 +333,7 @@ static __always_inline int index_of(const size_t size)
|
|||
return i; \
|
||||
else \
|
||||
i++;
|
||||
#include "linux/kmalloc_sizes.h"
|
||||
#include <linux/kmalloc_sizes.h>
|
||||
#undef CACHE
|
||||
__bad_size();
|
||||
} else
|
||||
|
@ -2964,11 +2964,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
|
|||
struct array_cache *ac;
|
||||
int node;
|
||||
|
||||
node = numa_node_id();
|
||||
|
||||
check_irq_off();
|
||||
ac = cpu_cache_get(cachep);
|
||||
retry:
|
||||
check_irq_off();
|
||||
node = numa_node_id();
|
||||
ac = cpu_cache_get(cachep);
|
||||
batchcount = ac->batchcount;
|
||||
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
|
||||
/*
|
||||
|
@ -3280,7 +3279,7 @@ retry:
|
|||
if (local_flags & __GFP_WAIT)
|
||||
local_irq_enable();
|
||||
kmem_flagcheck(cache, flags);
|
||||
obj = kmem_getpages(cache, flags, -1);
|
||||
obj = kmem_getpages(cache, local_flags, -1);
|
||||
if (local_flags & __GFP_WAIT)
|
||||
local_irq_disable();
|
||||
if (obj) {
|
||||
|
|
13
mm/slub.c
13
mm/slub.c
|
@ -1368,7 +1368,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
|||
struct page *page = c->page;
|
||||
int tail = 1;
|
||||
|
||||
if (c->freelist)
|
||||
if (page->freelist)
|
||||
stat(c, DEACTIVATE_REMOTE_FREES);
|
||||
/*
|
||||
* Merge cpu freelist into slab freelist. Typically we get here
|
||||
|
@ -1856,12 +1856,15 @@ static unsigned long calculate_alignment(unsigned long flags,
|
|||
* The hardware cache alignment cannot override the specified
|
||||
* alignment though. If that is greater then use it.
|
||||
*/
|
||||
if ((flags & SLAB_HWCACHE_ALIGN) &&
|
||||
size > cache_line_size() / 2)
|
||||
return max_t(unsigned long, align, cache_line_size());
|
||||
if (flags & SLAB_HWCACHE_ALIGN) {
|
||||
unsigned long ralign = cache_line_size();
|
||||
while (size <= ralign / 2)
|
||||
ralign /= 2;
|
||||
align = max(align, ralign);
|
||||
}
|
||||
|
||||
if (align < ARCH_SLAB_MINALIGN)
|
||||
return ARCH_SLAB_MINALIGN;
|
||||
align = ARCH_SLAB_MINALIGN;
|
||||
|
||||
return ALIGN(align, sizeof(void *));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue