Commit 15e3f6d7 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking,arch,parisc: Fold atomic_ops



OK, no LoC saved in this case because sub was defined in terms of add.
Still do it because this also prepares for easy addition of new ops.

Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Helge Deller <deller@gmx.de>
Cc: James E.J. Bottomley <jejb@parisc-linux.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: linux-parisc@vger.kernel.org
Link: http://lkml.kernel.org/r/20140508135852.659342353@infradead.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e69a0ef7
Loading
Loading
Loading
Loading
+69 −44
Original line number Diff line number Diff line
@@ -55,23 +55,6 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 * are atomic, so a reader never sees inconsistent values.
 */

/* It's possible to reduce all atomic operations to either
 * __atomic_add_return, atomic_set and atomic_read (the latter
 * is there only for consistency).
 */

static __inline__ int __atomic_add_return(int i, atomic_t *v)
{
	int ret;
	unsigned long flags;
	_atomic_spin_lock_irqsave(v, flags);

	ret = (v->counter += i);

	_atomic_spin_unlock_irqrestore(v, flags);
	return ret;
}

static __inline__ void atomic_set(atomic_t *v, int i)
{
	unsigned long flags;
@@ -115,16 +98,43 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
	return c;
}

#define ATOMIC_OP(op, c_op)						\
static __inline__ void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned long flags;						\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
}									\

#define ATOMIC_OP_RETURN(op, c_op)					\
static __inline__ int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned long flags;						\
	int ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = (v->counter c_op i);					\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}

#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)

ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)

#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

#define atomic_add(i,v)	((void)(__atomic_add_return(        (i),(v))))
#define atomic_sub(i,v)	((void)(__atomic_add_return(-((int) (i)),(v))))
#define atomic_inc(v)	((void)(__atomic_add_return(   1,(v))))
#define atomic_dec(v)	((void)(__atomic_add_return(  -1,(v))))
#define atomic_inc(v)	(atomic_add(   1,(v)))
#define atomic_dec(v)	(atomic_add(  -1,(v)))

#define atomic_add_return(i,v)	(__atomic_add_return( (i),(v)))
#define atomic_sub_return(i,v)	(__atomic_add_return(-(i),(v)))
#define atomic_inc_return(v)	(__atomic_add_return(   1,(v)))
#define atomic_dec_return(v)	(__atomic_add_return(  -1,(v)))
#define atomic_inc_return(v)	(atomic_add_return(   1,(v)))
#define atomic_dec_return(v)	(atomic_add_return(  -1,(v)))

#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)

@@ -148,18 +158,37 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)

#define ATOMIC64_INIT(i) { (i) }

static __inline__ s64
__atomic64_add_return(s64 i, atomic64_t *v)
{
	s64 ret;
	unsigned long flags;
	_atomic_spin_lock_irqsave(v, flags);
#define ATOMIC64_OP(op, c_op)						\
static __inline__ void atomic64_##op(s64 i, atomic64_t *v)		\
{									\
	unsigned long flags;						\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
}									\

#define ATOMIC64_OP_RETURN(op, c_op)					\
static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)	\
{									\
	unsigned long flags;						\
	s64 ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = (v->counter c_op i);					\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}

	ret = (v->counter += i);
#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)

	_atomic_spin_unlock_irqrestore(v, flags);
	return ret;
}
ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)

#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

static __inline__ void
atomic64_set(atomic64_t *v, s64 i)
@@ -178,15 +207,11 @@ atomic64_read(const atomic64_t *v)
	return (*(volatile long *)&(v)->counter);
}

#define atomic64_add(i,v)	((void)(__atomic64_add_return( ((s64)(i)),(v))))
#define atomic64_sub(i,v)	((void)(__atomic64_add_return(-((s64)(i)),(v))))
#define atomic64_inc(v)		((void)(__atomic64_add_return(   1,(v))))
#define atomic64_dec(v)		((void)(__atomic64_add_return(  -1,(v))))
#define atomic64_inc(v)		(atomic64_add(   1,(v)))
#define atomic64_dec(v)		(atomic64_add(  -1,(v)))

#define atomic64_add_return(i,v)	(__atomic64_add_return( ((s64)(i)),(v)))
#define atomic64_sub_return(i,v)	(__atomic64_add_return(-((s64)(i)),(v)))
#define atomic64_inc_return(v)		(__atomic64_add_return(   1,(v)))
#define atomic64_dec_return(v)		(__atomic64_add_return(  -1,(v)))
#define atomic64_inc_return(v)		(atomic64_add_return(   1,(v)))
#define atomic64_dec_return(v)		(atomic64_add_return(  -1,(v)))

#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)