Commit d84e28d2 authored by Mark Rutland's avatar Mark Rutland Committed by Ingo Molnar
Browse files

locking/atomic, ia64: Use s64 for atomic64



As a step towards making the atomic64 API use consistent types treewide,
let's have the ia64 atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long or __s64, matching the generated
headers.

As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long. This will be converted in a subsequent patch.

Otherwise, there should be no functional change as a result of this
patch.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aou@eecs.berkeley.edu
Cc: arnd@arndb.de
Cc: bp@alien8.de
Cc: catalin.marinas@arm.com
Cc: davem@davemloft.net
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: ink@jurassic.park.msu.ru
Cc: jhogan@kernel.org
Cc: linux@armlinux.org.uk
Cc: mattst88@gmail.com
Cc: mpe@ellerman.id.au
Cc: palmer@sifive.com
Cc: paul.burton@mips.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: rth@twiddle.net
Cc: vgupta@synopsys.com
Link: https://lkml.kernel.org/r/20190522132250.26499-9-mark.rutland@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 16f18688
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -124,10 +124,10 @@ ATOMIC_FETCH_OP(xor, ^)
#undef ATOMIC_OP

#define ATOMIC64_OP(op, c_op)						\
static __inline__ long							\
ia64_atomic64_##op (__s64 i, atomic64_t *v)				\
static __inline__ s64							\
ia64_atomic64_##op (s64 i, atomic64_t *v)				\
{									\
	__s64 old, new;							\
	s64 old, new;							\
	CMPXCHG_BUGCHECK_DECL						\
									\
	do {								\
@@ -139,10 +139,10 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \
}

#define ATOMIC64_FETCH_OP(op, c_op)					\
static __inline__ long							\
ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v)			\
static __inline__ s64							\
ia64_atomic64_fetch_##op (s64 i, atomic64_t *v)				\
{									\
	__s64 old, new;							\
	s64 old, new;							\
	CMPXCHG_BUGCHECK_DECL						\
									\
	do {								\
@@ -162,7 +162,7 @@ ATOMIC64_OPS(sub, -)

#define atomic64_add_return(i,v)					\
({									\
	long __ia64_aar_i = (i);					\
	s64 __ia64_aar_i = (i);						\
	__ia64_atomic_const(i)						\
		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
		: ia64_atomic64_add(__ia64_aar_i, v);			\
@@ -170,7 +170,7 @@ ATOMIC64_OPS(sub, -)

#define atomic64_sub_return(i,v)					\
({									\
	long __ia64_asr_i = (i);					\
	s64 __ia64_asr_i = (i);						\
	__ia64_atomic_const(i)						\
		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
		: ia64_atomic64_sub(__ia64_asr_i, v);			\
@@ -178,7 +178,7 @@ ATOMIC64_OPS(sub, -)

#define atomic64_fetch_add(i,v)						\
({									\
	long __ia64_aar_i = (i);					\
	s64 __ia64_aar_i = (i);						\
	__ia64_atomic_const(i)						\
		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
		: ia64_atomic64_fetch_add(__ia64_aar_i, v);		\
@@ -186,7 +186,7 @@ ATOMIC64_OPS(sub, -)

#define atomic64_fetch_sub(i,v)						\
({									\
	long __ia64_asr_i = (i);					\
	s64 __ia64_asr_i = (i);						\
	__ia64_atomic_const(i)						\
		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
		: ia64_atomic64_fetch_sub(__ia64_asr_i, v);		\