From 00d5551cc4eec0fc39c3871c25c613553acfb866 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 16 Jul 2018 12:30:07 +0100 Subject: [PATCH] locking/atomics/x86: Reduce arch_cmpxchg64*() instrumentation Currently x86's arch_cmpxchg64() and arch_cmpxchg64_local() are instrumented twice, as they call into instrumented atomics rather than their arch_ equivalents. A call to cmpxchg64() results in: cmpxchg64() kasan_check_write() arch_cmpxchg64() cmpxchg() kasan_check_write() arch_cmpxchg() Let's fix this up and call the arch_ equivalents, resulting in: cmpxchg64() kasan_check_write() arch_cmpxchg64() arch_cmpxchg() Signed-off-by: Mark Rutland Acked-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Cc: Boqun Feng Cc: Dmitry Vyukov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: andy.shevchenko@gmail.com Cc: arnd@arndb.de Cc: aryabinin@virtuozzo.com Cc: catalin.marinas@arm.com Cc: glider@google.com Cc: linux-arm-kernel@lists.infradead.org Cc: parri.andrea@gmail.com Cc: peter@hurleysoftware.com Link: http://lkml.kernel.org/r/20180716113017.3909-3-mark.rutland@arm.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cmpxchg_64.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index bfca3b346c74..072e5459fe2f 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h @@ -10,13 +10,13 @@ static inline void set_64bit(volatile u64 *ptr, u64 val) #define arch_cmpxchg64(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg((ptr), (o), (n)); \ + arch_cmpxchg((ptr), (o), (n)); \ }) #define arch_cmpxchg64_local(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ + arch_cmpxchg_local((ptr), (o), (n)); \ }) #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)