Commit af095dd6 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking,arch,powerpc: Fold atomic_ops



Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

Requires asm_op because PPC asm is weird :-)

Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/20140508135852.713980957@infradead.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 15e3f6d7
Loading
Loading
Loading
Loading
+77 −121
Original line number Diff line number Diff line
@@ -26,76 +26,53 @@ static __inline__ void atomic_set(atomic_t *v, int i)
	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
}

static __inline__ void atomic_add(int a, atomic_t *v)
{
	int t;

	__asm__ __volatile__(
"1:	lwarx	%0,0,%3		# atomic_add\n\
	add	%0,%2,%0\n"
	PPC405_ERR77(0,%3)
"	stwcx.	%0,0,%3 \n\
	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
	: "r" (a), "r" (&v->counter)
	: "cc");
#define ATOMIC_OP(op, asm_op)						\
static __inline__ void atomic_##op(int a, atomic_t *v)			\
{									\
	int t;								\
									\
	__asm__ __volatile__(						\
"1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
	#asm_op " %0,%2,%0\n"						\
	PPC405_ERR77(0,%3)						\
"	stwcx.	%0,0,%3 \n"						\
"	bne-	1b\n"							\
	: "=&r" (t), "+m" (v->counter)					\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
}									\

#define ATOMIC_OP_RETURN(op, asm_op)					\
static __inline__ int atomic_##op##_return(int a, atomic_t *v)		\
{									\
	int t;								\
									\
	__asm__ __volatile__(						\
	PPC_ATOMIC_ENTRY_BARRIER					\
"1:	lwarx	%0,0,%2		# atomic_" #op "_return\n"		\
	#asm_op " %0,%1,%0\n"						\
	PPC405_ERR77(0,%2)						\
"	stwcx.	%0,0,%2 \n"						\
"	bne-	1b\n"							\
	PPC_ATOMIC_EXIT_BARRIER						\
	: "=&r" (t)							\
	: "r" (a), "r" (&v->counter)					\
	: "cc", "memory");						\
									\
	return t;							\
}

static __inline__ int atomic_add_return(int a, atomic_t *v)
{
	int t;
#define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)

	__asm__ __volatile__(
	PPC_ATOMIC_ENTRY_BARRIER
"1:	lwarx	%0,0,%2		# atomic_add_return\n\
	add	%0,%1,%0\n"
	PPC405_ERR77(0,%2)
"	stwcx.	%0,0,%2 \n\
	bne-	1b"
	PPC_ATOMIC_EXIT_BARRIER
	: "=&r" (t)
	: "r" (a), "r" (&v->counter)
	: "cc", "memory");
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)

	return t;
}
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)

static __inline__ void atomic_sub(int a, atomic_t *v)
{
	int t;

	__asm__ __volatile__(
"1:	lwarx	%0,0,%3		# atomic_sub\n\
	subf	%0,%2,%0\n"
	PPC405_ERR77(0,%3)
"	stwcx.	%0,0,%3 \n\
	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
	: "r" (a), "r" (&v->counter)
	: "cc");
}

static __inline__ int atomic_sub_return(int a, atomic_t *v)
{
	int t;

	__asm__ __volatile__(
	PPC_ATOMIC_ENTRY_BARRIER
"1:	lwarx	%0,0,%2		# atomic_sub_return\n\
	subf	%0,%1,%0\n"
	PPC405_ERR77(0,%2)
"	stwcx.	%0,0,%2 \n\
	bne-	1b"
	PPC_ATOMIC_EXIT_BARRIER
	: "=&r" (t)
	: "r" (a), "r" (&v->counter)
	: "cc", "memory");

	return t;
}

static __inline__ void atomic_inc(atomic_t *v)
{
	int t;
@@ -289,71 +266,50 @@ static __inline__ void atomic64_set(atomic64_t *v, long i)
	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
}

static __inline__ void atomic64_add(long a, atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
"1:	ldarx	%0,0,%3		# atomic64_add\n\
	add	%0,%2,%0\n\
	stdcx.	%0,0,%3 \n\
	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
	: "r" (a), "r" (&v->counter)
	: "cc");
#define ATOMIC64_OP(op, asm_op)						\
static __inline__ void atomic64_##op(long a, atomic64_t *v)		\
{									\
	long t;								\
									\
	__asm__ __volatile__(						\
"1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
	#asm_op " %0,%2,%0\n"						\
"	stdcx.	%0,0,%3 \n"						\
"	bne-	1b\n"							\
	: "=&r" (t), "+m" (v->counter)					\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
}

static __inline__ long atomic64_add_return(long a, atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
	PPC_ATOMIC_ENTRY_BARRIER
"1:	ldarx	%0,0,%2		# atomic64_add_return\n\
	add	%0,%1,%0\n\
	stdcx.	%0,0,%2 \n\
	bne-	1b"
	PPC_ATOMIC_EXIT_BARRIER
	: "=&r" (t)
	: "r" (a), "r" (&v->counter)
	: "cc", "memory");

	return t;
#define ATOMIC64_OP_RETURN(op, asm_op)					\
static __inline__ long atomic64_##op##_return(long a, atomic64_t *v)	\
{									\
	long t;								\
									\
	__asm__ __volatile__(						\
	PPC_ATOMIC_ENTRY_BARRIER					\
"1:	ldarx	%0,0,%2		# atomic64_" #op "_return\n"		\
	#asm_op " %0,%1,%0\n"						\
"	stdcx.	%0,0,%2 \n"						\
"	bne-	1b\n"							\
	PPC_ATOMIC_EXIT_BARRIER						\
	: "=&r" (t)							\
	: "r" (a), "r" (&v->counter)					\
	: "cc", "memory");						\
									\
	return t;							\
}

#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
#define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)

static __inline__ void atomic64_sub(long a, atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
"1:	ldarx	%0,0,%3		# atomic64_sub\n\
	subf	%0,%2,%0\n\
	stdcx.	%0,0,%3 \n\
	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
	: "r" (a), "r" (&v->counter)
	: "cc");
}

static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
{
	long t;
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)

	__asm__ __volatile__(
	PPC_ATOMIC_ENTRY_BARRIER
"1:	ldarx	%0,0,%2		# atomic64_sub_return\n\
	subf	%0,%1,%0\n\
	stdcx.	%0,0,%2 \n\
	bne-	1b"
	PPC_ATOMIC_EXIT_BARRIER
	: "=&r" (t)
	: "r" (a), "r" (&v->counter)
	: "cc", "memory");
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

	return t;
}
#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)

static __inline__ void atomic64_inc(atomic64_t *v)
{