[PATCH] lightweight robust futexes: x86_64

x86_64: add the futex_atomic_cmpxchg_inuser() assembly implementation, and
wire up the new syscalls.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Acked-by: Ulrich Drepper <drepper@redhat.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Ingo Molnar 2006-03-27 01:16:26 -08:00 committed by Linus Torvalds
parent dfd4e3ec24
commit 8fdd6c6df7
3 changed files with 29 additions and 2 deletions

View File

@ -688,6 +688,8 @@ ia32_sys_call_table:
.quad sys_ni_syscall /* pselect6 for now */ .quad sys_ni_syscall /* pselect6 for now */
.quad sys_ni_syscall /* ppoll for now */ .quad sys_ni_syscall /* ppoll for now */
.quad sys_unshare /* 310 */ .quad sys_unshare /* 310 */
.quad compat_sys_set_robust_list
.quad compat_sys_get_robust_list
ia32_syscall_end: ia32_syscall_end:
.rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8 .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
.quad ni_syscall .quad ni_syscall

View File

@ -97,7 +97,28 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
static inline int static inline int
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
{ {
return -ENOSYS; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
__asm__ __volatile__(
"1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
"2: .section .fixup, \"ax\" \n"
"3: mov %2, %0 \n"
" jmp 2b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 8 \n"
" .quad 1b,3b \n"
" .previous \n"
: "=a" (oldval), "=m" (*uaddr)
: "i" (-EFAULT), "r" (newval), "0" (oldval)
: "memory"
);
return oldval;
} }
#endif #endif

View File

@ -605,8 +605,12 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */
__SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ __SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */
#define __NR_unshare 272 #define __NR_unshare 272
__SYSCALL(__NR_unshare, sys_unshare) __SYSCALL(__NR_unshare, sys_unshare)
#define __NR_set_robust_list 273
__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
#define __NR_get_robust_list 274
__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
#define __NR_syscall_max __NR_unshare #define __NR_syscall_max __NR_get_robust_list
#ifndef __NO_STUBS #ifndef __NO_STUBS