Commit 16fbad08 authored by Mark Rutland's avatar Mark Rutland Committed by Ingo Molnar
Browse files

locking/atomic, arc: Use s64 for atomic64



As a step towards making the atomic64 API use consistent types treewide,
let's have the arc atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than u64, matching the generated headers.

Otherwise, there should be no functional change as a result of this
patch.

Acked-By: default avatarVineet Gupta <vgupta@synopsys.com>
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aou@eecs.berkeley.edu
Cc: arnd@arndb.de
Cc: bp@alien8.de
Cc: catalin.marinas@arm.com
Cc: davem@davemloft.net
Cc: fenghua.yu@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: ink@jurassic.park.msu.ru
Cc: jhogan@kernel.org
Cc: linux@armlinux.org.uk
Cc: mattst88@gmail.com
Cc: mpe@ellerman.id.au
Cc: palmer@sifive.com
Cc: paul.burton@mips.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: rth@twiddle.net
Cc: tony.luck@intel.com
Link: https://lkml.kernel.org/r/20190522132250.26499-6-mark.rutland@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0203fdc1
Loading
Loading
Loading
Loading
+20 −21
Original line number Diff line number Diff line
@@ -324,14 +324,14 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
 */

typedef struct {
	aligned_u64 counter;
	s64 __aligned(8) counter;
} atomic64_t;

#define ATOMIC64_INIT(a) { (a) }

static inline long long atomic64_read(const atomic64_t *v)
static inline s64 atomic64_read(const atomic64_t *v)
{
	unsigned long long val;
	s64 val;

	__asm__ __volatile__(
	"	ldd   %0, [%1]	\n"
@@ -341,7 +341,7 @@ static inline long long atomic64_read(const atomic64_t *v)
	return val;
}

static inline void atomic64_set(atomic64_t *v, long long a)
static inline void atomic64_set(atomic64_t *v, s64 a)
{
	/*
	 * This could have been a simple assignment in "C" but would need
@@ -362,9 +362,9 @@ static inline void atomic64_set(atomic64_t *v, long long a)
}

#define ATOMIC64_OP(op, op1, op2)					\
static inline void atomic64_##op(long long a, atomic64_t *v)		\
static inline void atomic64_##op(s64 a, atomic64_t *v)			\
{									\
	unsigned long long val;						\
	s64 val;							\
									\
	__asm__ __volatile__(						\
	"1:				\n"				\
@@ -379,9 +379,9 @@ static inline void atomic64_##op(long long a, atomic64_t *v) \
}									\

#define ATOMIC64_OP_RETURN(op, op1, op2)		        	\
static inline long long atomic64_##op##_return(long long a, atomic64_t *v)	\
static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v)		\
{									\
	unsigned long long val;						\
	s64 val;							\
									\
	smp_mb();							\
									\
@@ -402,9 +402,9 @@ static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \
}

#define ATOMIC64_FETCH_OP(op, op1, op2)		        		\
static inline long long atomic64_fetch_##op(long long a, atomic64_t *v)	\
static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v)		\
{									\
	unsigned long long val, orig;					\
	s64 val, orig;							\
									\
	smp_mb();							\
									\
@@ -444,10 +444,10 @@ ATOMIC64_OPS(xor, xor, xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

static inline long long
atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
static inline s64
atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
{
	long long prev;
	s64 prev;

	smp_mb();

@@ -467,9 +467,9 @@ atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
	return prev;
}

static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
{
	long long prev;
	s64 prev;

	smp_mb();

@@ -495,9 +495,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
 * the atomic variable, v, was not decremented.
 */

static inline long long atomic64_dec_if_positive(atomic64_t *v)
static inline s64 atomic64_dec_if_positive(atomic64_t *v)
{
	long long val;
	s64 val;

	smp_mb();

@@ -528,10 +528,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
 * Atomically adds @a to @v, if it was not @u.
 * Returns the old value of @v
 */
static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
						  long long u)
static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
	long long old, temp;
	s64 old, temp;

	smp_mb();