Commit ef4cdc09 authored by Mark Rutland's avatar Mark Rutland Committed by Ingo Molnar
Browse files

locking/atomic, arm: Use s64 for atomic64



As a step towards making the atomic64 API use consistent types treewide,
let's have the arm atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long long, matching the generated
headers.

Otherwise, there should be no functional change as a result of this
patch.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aou@eecs.berkeley.edu
Cc: arnd@arndb.de
Cc: bp@alien8.de
Cc: catalin.marinas@arm.com
Cc: davem@davemloft.net
Cc: fenghua.yu@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: ink@jurassic.park.msu.ru
Cc: jhogan@kernel.org
Cc: mattst88@gmail.com
Cc: mpe@ellerman.id.au
Cc: palmer@sifive.com
Cc: paul.burton@mips.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: rth@twiddle.net
Cc: tony.luck@intel.com
Cc: vgupta@synopsys.com
Link: https://lkml.kernel.org/r/20190522132250.26499-7-mark.rutland@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 16fbad08
Loading
Loading
Loading
Loading
+24 −26
Original line number Diff line number Diff line
@@ -249,15 +249,15 @@ ATOMIC_OPS(xor, ^=, eor)

#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
	long long counter;
	s64 counter;
} atomic64_t;

#define ATOMIC64_INIT(i) { (i) }

#ifdef CONFIG_ARM_LPAE
static inline long long atomic64_read(const atomic64_t *v)
static inline s64 atomic64_read(const atomic64_t *v)
{
	long long result;
	s64 result;

	__asm__ __volatile__("@ atomic64_read\n"
"	ldrd	%0, %H0, [%1]"
@@ -268,7 +268,7 @@ static inline long long atomic64_read(const atomic64_t *v)
	return result;
}

static inline void atomic64_set(atomic64_t *v, long long i)
static inline void atomic64_set(atomic64_t *v, s64 i)
{
	__asm__ __volatile__("@ atomic64_set\n"
"	strd	%2, %H2, [%1]"
@@ -277,9 +277,9 @@ static inline void atomic64_set(atomic64_t *v, long long i)
	);
}
#else
static inline long long atomic64_read(const atomic64_t *v)
static inline s64 atomic64_read(const atomic64_t *v)
{
	long long result;
	s64 result;

	__asm__ __volatile__("@ atomic64_read\n"
"	ldrexd	%0, %H0, [%1]"
@@ -290,9 +290,9 @@ static inline long long atomic64_read(const atomic64_t *v)
	return result;
}

static inline void atomic64_set(atomic64_t *v, long long i)
static inline void atomic64_set(atomic64_t *v, s64 i)
{
	long long tmp;
	s64 tmp;

	prefetchw(&v->counter);
	__asm__ __volatile__("@ atomic64_set\n"
@@ -307,9 +307,9 @@ static inline void atomic64_set(atomic64_t *v, long long i)
#endif

#define ATOMIC64_OP(op, op1, op2)					\
static inline void atomic64_##op(long long i, atomic64_t *v)		\
static inline void atomic64_##op(s64 i, atomic64_t *v)			\
{									\
	long long result;						\
	s64 result;							\
	unsigned long tmp;						\
									\
	prefetchw(&v->counter);						\
@@ -326,10 +326,10 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
}									\

#define ATOMIC64_OP_RETURN(op, op1, op2)				\
static inline long long							\
atomic64_##op##_return_relaxed(long long i, atomic64_t *v)		\
static inline s64							\
atomic64_##op##_return_relaxed(s64 i, atomic64_t *v)			\
{									\
	long long result;						\
	s64 result;							\
	unsigned long tmp;						\
									\
	prefetchw(&v->counter);						\
@@ -349,10 +349,10 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
}

#define ATOMIC64_FETCH_OP(op, op1, op2)					\
static inline long long							\
atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v)		\
static inline s64							\
atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v)			\
{									\
	long long result, val;						\
	s64 result, val;						\
	unsigned long tmp;						\
									\
	prefetchw(&v->counter);						\
@@ -406,10 +406,9 @@ ATOMIC64_OPS(xor, eor, eor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

static inline long long
atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
{
	long long oldval;
	s64 oldval;
	unsigned long res;

	prefetchw(&ptr->counter);
@@ -430,9 +429,9 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
}
#define atomic64_cmpxchg_relaxed	atomic64_cmpxchg_relaxed

static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
{
	long long result;
	s64 result;
	unsigned long tmp;

	prefetchw(&ptr->counter);
@@ -450,9 +449,9 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
}
#define atomic64_xchg_relaxed		atomic64_xchg_relaxed

static inline long long atomic64_dec_if_positive(atomic64_t *v)
static inline s64 atomic64_dec_if_positive(atomic64_t *v)
{
	long long result;
	s64 result;
	unsigned long tmp;

	smp_mb();
@@ -478,10 +477,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
}
#define atomic64_dec_if_positive atomic64_dec_if_positive

static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
						  long long u)
static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
	long long oldval, newval;
	s64 oldval, newval;
	unsigned long tmp;

	smp_mb();