mm/slab: do not change cache size if debug pagealloc isn't possible
We can fail to setup off slab in some conditions. Even in this case, debug pagealloc increases cache size to PAGE_SIZE in advance and it is waste because debug pagealloc cannot work for it when it isn't the off slab. To improve this situation, this patch checks first that this cache with increased size is suitable for off slab. It actually increases cache size when it is suitable for off-slab, so possible waste is removed. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
158e319bba
commit
f3a3c320d5
15
mm/slab.c
15
mm/slab.c
|
@ -2206,10 +2206,17 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|||
*/
|
||||
if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
|
||||
!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
|
||||
size >= 256 && cachep->object_size > cache_line_size() &&
|
||||
size < PAGE_SIZE) {
|
||||
cachep->obj_offset += PAGE_SIZE - size;
|
||||
size = PAGE_SIZE;
|
||||
size >= 256 && cachep->object_size > cache_line_size()) {
|
||||
if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
|
||||
size_t tmp_size = ALIGN(size, PAGE_SIZE);
|
||||
|
||||
if (set_off_slab_cache(cachep, tmp_size, flags)) {
|
||||
flags |= CFLGS_OFF_SLAB;
|
||||
cachep->obj_offset += tmp_size - size;
|
||||
size = tmp_size;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in New Issue