Commit 64d816cb authored by Waiman Long's avatar Waiman Long Committed by Ingo Molnar
Browse files

locking/qspinlock: Use _acquire/_release() versions of cmpxchg() & xchg()



This patch replaces the cmpxchg() and xchg() calls in the native
qspinlock code with the more relaxed _acquire or _release versions of
those calls to enable other architectures to adopt queued spinlocks
with less memory barrier performance overhead.

Signed-off-by: default avatarWaiman Long <Waiman.Long@hpe.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Douglas Hatch <doug.hatch@hpe.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hpe.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1447114167-47185-2-git-send-email-Waiman.Long@hpe.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 978e5a36
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -12,8 +12,9 @@
 * GNU General Public License for more details.
 *
 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
 *
 * Authors: Waiman Long <waiman.long@hp.com>
 * Authors: Waiman Long <waiman.long@hpe.com>
 */
#ifndef __ASM_GENERIC_QSPINLOCK_H
#define __ASM_GENERIC_QSPINLOCK_H
@@ -62,7 +63,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
	if (!atomic_read(&lock->val) &&
	   (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
	   (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
		return 1;
	return 0;
}
@@ -77,7 +78,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
	u32 val;

	val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
	val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
	if (likely(val == 0))
		return;
	queued_spin_lock_slowpath(lock, val);
@@ -93,7 +94,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
	/*
	 * smp_mb__before_atomic() in order to guarantee release semantics
	 */
	smp_mb__before_atomic_dec();
	smp_mb__before_atomic();
	atomic_sub(_Q_LOCKED_VAL, &lock->val);
}
#endif
+24 −5
Original line number Diff line number Diff line
@@ -14,8 +14,9 @@
 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
 * (C) Copyright 2013-2014 Red Hat, Inc.
 * (C) Copyright 2015 Intel Corp.
 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
 *
 * Authors: Waiman Long <waiman.long@hp.com>
 * Authors: Waiman Long <waiman.long@hpe.com>
 *          Peter Zijlstra <peterz@infradead.org>
 */

@@ -176,7 +177,12 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
{
	struct __qspinlock *l = (void *)lock;

	return (u32)xchg(&l->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
	/*
	 * Use release semantics to make sure that the MCS node is properly
	 * initialized before changing the tail code.
	 */
	return (u32)xchg_release(&l->tail,
				 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
}

#else /* _Q_PENDING_BITS == 8 */
@@ -208,7 +214,11 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)

	for (;;) {
		new = (val & _Q_LOCKED_PENDING_MASK) | tail;
		old = atomic_cmpxchg(&lock->val, val, new);
		/*
		 * Use release semantics to make sure that the MCS node is
		 * properly initialized before changing the tail code.
		 */
		old = atomic_cmpxchg_release(&lock->val, val, new);
		if (old == val)
			break;

@@ -319,7 +329,11 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
		if (val == new)
			new |= _Q_PENDING_VAL;

		old = atomic_cmpxchg(&lock->val, val, new);
		/*
		 * Acquire semantic is required here as the function may
		 * return immediately if the lock was free.
		 */
		old = atomic_cmpxchg_acquire(&lock->val, val, new);
		if (old == val)
			break;

@@ -426,7 +440,12 @@ queue:
			set_locked(lock);
			break;
		}
		old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL);
		/*
		 * The smp_load_acquire() call above has provided the necessary
		 * acquire semantics required for locking. At most two
		 * iterations of this loop may be ran.
		 */
		old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
		if (old == val)
			goto release;	/* No contention */