mm/slub.c: branch optimization in free slowpath
The two conditions are mutually exclusive and gcc compiler will optimise this into if-else-like pattern. Given that the majority of free_slowpath is free_frozen, let's provide some hint to the compilers. Tests (perf bench sched messaging -g 20 -l 400000, executed 10x after reboot) are done and the summarized result: un-patched patched max. 192.316 189.851 min. 187.267 186.252 avg. 189.154 188.086 stdev. 1.37 0.99 Signed-off-by: Abel Wu <wuyun.wu@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Hewenliang <hewenliang4@huawei.com> Cc: Hu Shiyuan <hushiyuan@huawei.com> Link: http://lkml.kernel.org/r/20200813101812.1617-1-wuyun.wu@huawei.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d7cff4ded8
commit
c270cf3041
23
mm/slub.c
23
mm/slub.c
|
@ -3019,20 +3019,21 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|||
|
||||
if (likely(!n)) {
|
||||
|
||||
/*
|
||||
* If we just froze the page then put it onto the
|
||||
* per cpu partial list.
|
||||
*/
|
||||
if (new.frozen && !was_frozen) {
|
||||
if (likely(was_frozen)) {
|
||||
/*
|
||||
* The list lock was not taken therefore no list
|
||||
* activity can be necessary.
|
||||
*/
|
||||
stat(s, FREE_FROZEN);
|
||||
} else if (new.frozen) {
|
||||
/*
|
||||
* If we just froze the page then put it onto the
|
||||
* per cpu partial list.
|
||||
*/
|
||||
put_cpu_partial(s, page, 1);
|
||||
stat(s, CPU_PARTIAL_FREE);
|
||||
}
|
||||
/*
|
||||
* The list lock was not taken therefore no list
|
||||
* activity can be necessary.
|
||||
*/
|
||||
if (was_frozen)
|
||||
stat(s, FREE_FROZEN);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue