Commit d184cf1a authored by Mark Rutland's avatar Mark Rutland Committed by Ingo Molnar
Browse files

locking/atomic, mips: Use s64 for atomic64



As a step towards making the atomic64 API use consistent types treewide,
let's have the mips atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long or __s64, matching the generated
headers.

As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long on 64-bit. This will be converted in a subsequent
patch.

Otherwise, there should be no functional change as a result of this
patch.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: James Hogan <jhogan@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aou@eecs.berkeley.edu
Cc: arnd@arndb.de
Cc: bp@alien8.de
Cc: catalin.marinas@arm.com
Cc: davem@davemloft.net
Cc: fenghua.yu@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: ink@jurassic.park.msu.ru
Cc: linux@armlinux.org.uk
Cc: mattst88@gmail.com
Cc: mpe@ellerman.id.au
Cc: palmer@sifive.com
Cc: paulus@samba.org
Cc: rth@twiddle.net
Cc: tony.luck@intel.com
Cc: vgupta@synopsys.com
Link: https://lkml.kernel.org/r/20190522132250.26499-10-mark.rutland@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent d84e28d2
Loading
Loading
Loading
Loading
+11 −11
Original line number Diff line number Diff line
@@ -254,10 +254,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
#define atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))

#define ATOMIC64_OP(op, c_op, asm_op)					      \
static __inline__ void atomic64_##op(long i, atomic64_t * v)		      \
static __inline__ void atomic64_##op(s64 i, atomic64_t * v)		      \
{									      \
	if (kernel_uses_llsc) {						      \
		long temp;						      \
		s64 temp;						      \
									      \
		loongson_llsc_mb();					      \
		__asm__ __volatile__(					      \
@@ -280,12 +280,12 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
}

#define ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)   \
{									      \
	long result;							      \
	s64 result;							      \
									      \
	if (kernel_uses_llsc) {						      \
		long temp;						      \
		s64 temp;						      \
									      \
		loongson_llsc_mb();					      \
		__asm__ __volatile__(					      \
@@ -314,12 +314,12 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
}

#define ATOMIC64_FETCH_OP(op, c_op, asm_op)				      \
static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)  \
static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)    \
{									      \
	long result;							      \
	s64 result;							      \
									      \
	if (kernel_uses_llsc) {						      \
		long temp;						      \
		s64 temp;						      \
									      \
		loongson_llsc_mb();					      \
		__asm__ __volatile__(					      \
@@ -386,14 +386,14 @@ ATOMIC64_OPS(xor, ^=, xor)
 * Atomically test @v and subtract @i if @v is greater or equal than @i.
 * The function returns the old value of @v minus @i.
 */
static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
{
	long result;
	s64 result;

	smp_mb__before_llsc();

	if (kernel_uses_llsc) {
		long temp;
		s64 temp;

		__asm__ __volatile__(
		"	.set	push					\n"