Commit 3337cb5a authored by Andrew Murray's avatar Andrew Murray Committed by Will Deacon
Browse files

arm64: avoid using hard-coded registers for LSE atomics



Now that we have removed the out-of-line ll/sc atomics we can give
the compiler the freedom to choose its own register allocation.

Remove the hard-coded use of x30.

Signed-off-by: default avatarAndrew Murray <andrew.murray@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent addfc386
Loading
Loading
Loading
Loading
+41 −29
Original line number Diff line number Diff line
@@ -55,12 +55,14 @@ ATOMIC_FETCH_OPS(add, ldadd)
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...)				\
static inline int __lse_atomic_add_return##name(int i, atomic_t *v)	\
{									\
	u32 tmp;							\
									\
	asm volatile(							\
	"	ldadd" #mb "	%w[i], w30, %[v]\n"			\
	"	add	%w[i], %w[i], w30"				\
	: [i] "+r" (i), [v] "+Q" (v->counter)				\
	"	ldadd" #mb "	%w[i], %w[tmp], %[v]\n"			\
	"	add	%w[i], %w[i], %w[tmp]"				\
	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
	: "r" (v)							\
	: "x30", ##cl);							\
	: cl);								\
									\
	return i;							\
}
@@ -113,13 +115,15 @@ static inline void __lse_atomic_sub(int i, atomic_t *v)
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...)				\
static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)	\
{									\
	u32 tmp;							\
									\
	asm volatile(							\
	"	neg	%w[i], %w[i]\n"					\
	"	ldadd" #mb "	%w[i], w30, %[v]\n"			\
	"	add	%w[i], %w[i], w30"				\
	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
	"	ldadd" #mb "	%w[i], %w[tmp], %[v]\n"			\
	"	add	%w[i], %w[i], %w[tmp]"				\
	: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
	: "r" (v)							\
	: "x30", ##cl);							\
	: cl);							\
									\
	return i;							\
}
@@ -196,12 +200,14 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)				\
static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
{									\
	unsigned long tmp;						\
									\
	asm volatile(							\
	"	ldadd" #mb "	%[i], x30, %[v]\n"			\
	"	add	%[i], %[i], x30"				\
	: [i] "+r" (i), [v] "+Q" (v->counter)				\
	"	ldadd" #mb "	%[i], %x[tmp], %[v]\n"			\
	"	add	%[i], %[i], %x[tmp]"				\
	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
	: "r" (v)							\
	: "x30", ##cl);							\
	: cl);								\
									\
	return i;							\
}
@@ -254,13 +260,15 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)				\
static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)	\
{									\
	unsigned long tmp;						\
									\
	asm volatile(							\
	"	neg	%[i], %[i]\n"					\
	"	ldadd" #mb "	%[i], x30, %[v]\n"			\
	"	add	%[i], %[i], x30"				\
	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
	"	ldadd" #mb "	%[i], %x[tmp], %[v]\n"			\
	"	add	%[i], %[i], %x[tmp]"				\
	: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
	: "r" (v)							\
	: "x30", ##cl);							\
	: cl);								\
									\
	return i;							\
}
@@ -294,18 +302,20 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory")

static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
{
	unsigned long tmp;

	asm volatile(
	"1:	ldr	x30, %[v]\n"
	"	subs	%[ret], x30, #1\n"
	"1:	ldr	%x[tmp], %[v]\n"
	"	subs	%[ret], %x[tmp], #1\n"
	"	b.lt	2f\n"
	"	casal	x30, %[ret], %[v]\n"
	"	sub	x30, x30, #1\n"
	"	sub	x30, x30, %[ret]\n"
	"	cbnz	x30, 1b\n"
	"	casal	%x[tmp], %[ret], %[v]\n"
	"	sub	%x[tmp], %x[tmp], #1\n"
	"	sub	%x[tmp], %x[tmp], %[ret]\n"
	"	cbnz	%x[tmp], 1b\n"
	"2:"
	: [ret] "+&r" (v), [v] "+Q" (v->counter)
	: [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
	:
	: "x30", "cc", "memory");
	: "cc", "memory");

	return (long)v;
}
@@ -318,14 +328,16 @@ static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
	register unsigned long x0 asm ("x0") = (unsigned long)ptr;	\
	register u##sz x1 asm ("x1") = old;				\
	register u##sz x2 asm ("x2") = new;				\
	unsigned long tmp;						\
									\
	asm volatile(							\
	"	mov	" #w "30, %" #w "[old]\n"			\
	"	cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n"	\
	"	mov	%" #w "[ret], " #w "30"				\
	: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr)		\
	"	mov	%" #w "[tmp], %" #w "[old]\n"			\
	"	cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"	\
	"	mov	%" #w "[ret], %" #w "[tmp]"			\
	: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr),		\
	  [tmp] "=&r" (tmp)						\
	: [old] "r" (x1), [new] "r" (x2)				\
	: "x30", ##cl);							\
	: cl);								\
									\
	return x0;							\
}