Commit 8fdd6c6d authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds
Browse files

[PATCH] lightweight robust futexes: x86_64



x86_64: add the futex_atomic_cmpxchg_inuser() assembly implementation, and
wire up the new syscalls.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarArjan van de Ven <arjan@infradead.org>
Acked-by: default avatarUlrich Drepper <drepper@redhat.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent dfd4e3ec
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -688,6 +688,8 @@ ia32_sys_call_table:
	.quad sys_ni_syscall		/* pselect6 for now */
	.quad sys_ni_syscall		/* ppoll for now */
	.quad sys_unshare		/* 310 */
	.quad compat_sys_set_robust_list
	.quad compat_sys_get_robust_list
ia32_syscall_end:		
	.rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
		.quad ni_syscall
+22 −1
Original line number Diff line number Diff line
@@ -97,7 +97,28 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
static inline int
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
{
	return -ENOSYS;
	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
		return -EFAULT;

	__asm__ __volatile__(
		"1:	" LOCK_PREFIX "cmpxchgl %3, %1		\n"

		"2:	.section .fixup, \"ax\"			\n"
		"3:	mov     %2, %0				\n"
		"	jmp     2b				\n"
		"	.previous				\n"

		"	.section __ex_table, \"a\"		\n"
		"	.align  8				\n"
		"	.quad   1b,3b				\n"
		"	.previous				\n"

		: "=a" (oldval), "=m" (*uaddr)
		: "i" (-EFAULT), "r" (newval), "0" (oldval)
		: "memory"
	);

	return oldval;
}

#endif
+5 −1
Original line number Diff line number Diff line
@@ -605,8 +605,12 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */
__SYSCALL(__NR_ppoll,	sys_ni_syscall)		/* for now */
#define __NR_unshare		272
__SYSCALL(__NR_unshare,	sys_unshare)
#define __NR_set_robust_list	273
__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
#define __NR_get_robust_list	274
__SYSCALL(__NR_get_robust_list, sys_get_robust_list)

#define __NR_syscall_max __NR_unshare
#define __NR_syscall_max __NR_get_robust_list

#ifndef __NO_STUBS