Commit 16f18688 authored by Mark Rutland's avatar Mark Rutland Committed by Ingo Molnar
Browse files

locking/atomic, arm64: Use s64 for atomic64



As a step towards making the atomic64 API use consistent types treewide,
let's have the arm64 atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long, matching the generated headers.

As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long. This will be converted in a subsequent patch.

Note that in arch_atomic64_dec_if_positive(), the x0 variable is left as
long, as this variable is also used to hold the pointer to the
atomic64_t.

Otherwise, there should be no functional change as a result of this patch.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aou@eecs.berkeley.edu
Cc: arnd@arndb.de
Cc: bp@alien8.de
Cc: davem@davemloft.net
Cc: fenghua.yu@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: ink@jurassic.park.msu.ru
Cc: jhogan@kernel.org
Cc: linux@armlinux.org.uk
Cc: mattst88@gmail.com
Cc: mpe@ellerman.id.au
Cc: palmer@sifive.com
Cc: paul.burton@mips.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: rth@twiddle.net
Cc: tony.luck@intel.com
Cc: vgupta@synopsys.com
Link: https://lkml.kernel.org/r/20190522132250.26499-8-mark.rutland@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ef4cdc09
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -133,9 +133,9 @@ ATOMIC_OPS(xor, eor)

#define ATOMIC64_OP(op, asm_op)						\
__LL_SC_INLINE void							\
__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v))		\
__LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v))		\
{									\
	long result;							\
	s64 result;							\
	unsigned long tmp;						\
									\
	asm volatile("// atomic64_" #op "\n"				\
@@ -150,10 +150,10 @@ __LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \
__LL_SC_EXPORT(arch_atomic64_##op);

#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)		\
__LL_SC_INLINE long							\
__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
__LL_SC_INLINE s64							\
__LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\
{									\
	long result;							\
	s64 result;							\
	unsigned long tmp;						\
									\
	asm volatile("// atomic64_" #op "_return" #name "\n"		\
@@ -172,10 +172,10 @@ __LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
__LL_SC_EXPORT(arch_atomic64_##op##_return##name);

#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)		\
__LL_SC_INLINE long							\
__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v))	\
__LL_SC_INLINE s64							\
__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v))	\
{									\
	long result, val;						\
	s64 result, val;						\
	unsigned long tmp;						\
									\
	asm volatile("// atomic64_fetch_" #op #name "\n"		\
@@ -225,10 +225,10 @@ ATOMIC64_OPS(xor, eor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

__LL_SC_INLINE long
__LL_SC_INLINE s64
__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
{
	long result;
	s64 result;
	unsigned long tmp;

	asm volatile("// atomic64_dec_if_positive\n"
+17 −17
Original line number Diff line number Diff line
@@ -224,9 +224,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")

#define __LL_SC_ATOMIC64(op)	__LL_SC_CALL(arch_atomic64_##op)
#define ATOMIC64_OP(op, asm_op)						\
static inline void arch_atomic64_##op(long i, atomic64_t *v)		\
static inline void arch_atomic64_##op(s64 i, atomic64_t *v)		\
{									\
	register long x0 asm ("x0") = i;				\
	register s64 x0 asm ("x0") = i;					\
	register atomic64_t *x1 asm ("x1") = v;				\
									\
	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op),	\
@@ -244,9 +244,9 @@ ATOMIC64_OP(add, stadd)
#undef ATOMIC64_OP

#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\
static inline s64 arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)	\
{									\
	register long x0 asm ("x0") = i;				\
	register s64 x0 asm ("x0") = i;					\
	register atomic64_t *x1 asm ("x1") = v;				\
									\
	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
@@ -276,9 +276,9 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#undef ATOMIC64_FETCH_OPS

#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)				\
static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\
static inline s64 arch_atomic64_add_return##name(s64 i, atomic64_t *v)	\
{									\
	register long x0 asm ("x0") = i;				\
	register s64 x0 asm ("x0") = i;					\
	register atomic64_t *x1 asm ("x1") = v;				\
									\
	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
@@ -302,9 +302,9 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")

#undef ATOMIC64_OP_ADD_RETURN

static inline void arch_atomic64_and(long i, atomic64_t *v)
static inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
	register long x0 asm ("x0") = i;
	register s64 x0 asm ("x0") = i;
	register atomic64_t *x1 asm ("x1") = v;

	asm volatile(ARM64_LSE_ATOMIC_INSN(
@@ -320,9 +320,9 @@ static inline void arch_atomic64_and(long i, atomic64_t *v)
}

#define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v)	\
static inline s64 arch_atomic64_fetch_and##name(s64 i, atomic64_t *v)	\
{									\
	register long x0 asm ("x0") = i;				\
	register s64 x0 asm ("x0") = i;					\
	register atomic64_t *x1 asm ("x1") = v;				\
									\
	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
@@ -346,9 +346,9 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")

#undef ATOMIC64_FETCH_OP_AND

static inline void arch_atomic64_sub(long i, atomic64_t *v)
static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
{
	register long x0 asm ("x0") = i;
	register s64 x0 asm ("x0") = i;
	register atomic64_t *x1 asm ("x1") = v;

	asm volatile(ARM64_LSE_ATOMIC_INSN(
@@ -364,9 +364,9 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
}

#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)				\
static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\
static inline s64 arch_atomic64_sub_return##name(s64 i, atomic64_t *v)	\
{									\
	register long x0 asm ("x0") = i;				\
	register s64 x0 asm ("x0") = i;					\
	register atomic64_t *x1 asm ("x1") = v;				\
									\
	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
@@ -392,9 +392,9 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC64_OP_SUB_RETURN

#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)				\
static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v)	\
static inline s64 arch_atomic64_fetch_sub##name(s64 i, atomic64_t *v)	\
{									\
	register long x0 asm ("x0") = i;				\
	register s64 x0 asm ("x0") = i;					\
	register atomic64_t *x1 asm ("x1") = v;				\
									\
	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
@@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory")

#undef ATOMIC64_FETCH_OP_SUB

static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
	register long x0 asm ("x0") = (long)v;