SLUB: Optimize slab_free() debug check
This patch optimizes slab_free() debug check to use "c->node != NUMA_NO_NODE" instead of "c->node >= 0" because the former generates smaller code on x86-64: Before: 4736: 48 39 70 08 cmp %rsi,0x8(%rax) 473a: 75 26 jne 4762 <kfree+0xa2> 473c: 44 8b 48 10 mov 0x10(%rax),%r9d 4740: 45 85 c9 test %r9d,%r9d 4743: 78 1d js 4762 <kfree+0xa2> After: 4736: 48 39 70 08 cmp %rsi,0x8(%rax) 473a: 75 23 jne 475f <kfree+0x9f> 473c: 83 78 10 ff cmpl $0xffffffffffffffff,0x10(%rax) 4740: 74 1d je 475f <kfree+0x9f> This patch also cleans up __slab_alloc() to use NUMA_NO_NODE instead of "-1" for enabling debugging for a per-CPU cache. Acked-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
5d1f57e4d3
commit
15b7c51420
|
@ -1718,7 +1718,7 @@ debug:
|
||||||
|
|
||||||
c->page->inuse++;
|
c->page->inuse++;
|
||||||
c->page->freelist = get_freepointer(s, object);
|
c->page->freelist = get_freepointer(s, object);
|
||||||
c->node = -1;
|
c->node = NUMA_NO_NODE;
|
||||||
goto unlock_out;
|
goto unlock_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1895,7 +1895,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
|
||||||
|
|
||||||
slab_free_hook_irq(s, x);
|
slab_free_hook_irq(s, x);
|
||||||
|
|
||||||
if (likely(page == c->page && c->node >= 0)) {
|
if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
|
||||||
set_freepointer(s, object, c->freelist);
|
set_freepointer(s, object, c->freelist);
|
||||||
c->freelist = object;
|
c->freelist = object;
|
||||||
stat(s, FREE_FASTPATH);
|
stat(s, FREE_FASTPATH);
|
||||||
|
|
Loading…
Reference in New Issue