Commit d1331661 authored by Will Deacon's avatar Will Deacon Committed by Ingo Molnar
Browse files

locking/qrwlock: Prevent slowpath writers getting held up by fastpath



When a prospective writer takes the qrwlock locking slowpath due to the
lock being held, it attempts to cmpxchg the wmode field from 0 to
_QW_WAITING so that concurrent lockers also take the slowpath and queue
on the spinlock accordingly, allowing the lockers to drain.

Unfortunately, this isn't fair, because a fastpath writer that comes in
after the lock is made available but before the _QW_WAITING flag is set
can effectively jump the queue. If there is a steady stream of prospective
writers, then the waiter will be held off indefinitely.

This patch restores fairness by separating _QW_WAITING and _QW_LOCKED
into two distinct fields: _QW_LOCKED continues to occupy the bottom byte
of the lockword so that it can be cleared unconditionally when unlocking,
but _QW_WAITING now occupies what used to be the bottom bit of the reader
count. This then forces the slow-path for concurrent lockers.

Tested-by: default avatarWaiman Long <longman@redhat.com>
Tested-by: default avatarJeremy Linton <jeremy.linton@arm.com>
Tested-by: default avatarAdam Wallis <awallis@codeaurora.org>
Tested-by: default avatarJan Glauber <jglauber@cavium.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Jeremy.Linton@arm.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1507810851-306-6-git-send-email-will.deacon@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 087133ac
Loading
Loading
Loading
Loading
+5 −18
Original line number Diff line number Diff line
@@ -26,24 +26,11 @@

/*
 * Writer states & reader shift and bias.
 *
 *       | +0 | +1 | +2 | +3 |
 *   ----+----+----+----+----+
 *    LE | 78 | 56 | 34 | 12 | 0x12345678
 *   ----+----+----+----+----+
 *       | wr |      rd      |
 *       +----+----+----+----+
 *
 *   ----+----+----+----+----+
 *    BE | 12 | 34 | 56 | 78 | 0x12345678
 *   ----+----+----+----+----+
 *       |      rd      | wr |
 *       +----+----+----+----+
 */
#define	_QW_WAITING	1		/* A writer is waiting	   */
#define	_QW_LOCKED	0xff		/* A writer holds the lock */
#define	_QW_WMASK	0xff		/* Writer mask		   */
#define	_QR_SHIFT	8		/* Reader count shift	   */
#define	_QW_WAITING	0x100		/* A writer is waiting	   */
#define	_QW_LOCKED	0x0ff		/* A writer holds the lock */
#define	_QW_WMASK	0x1ff		/* Writer mask		   */
#define	_QR_SHIFT	9		/* Reader count shift	   */
#define _QR_BIAS	(1U << _QR_SHIFT)

/*
@@ -134,7 +121,7 @@ static inline void queued_read_unlock(struct qrwlock *lock)
 */
static inline void queued_write_unlock(struct qrwlock *lock)
{
	smp_store_release(&lock->wmode, 0);
	smp_store_release(&lock->wlocked, 0);
}

/*
+4 −4
Original line number Diff line number Diff line
@@ -13,11 +13,11 @@ typedef struct qrwlock {
		atomic_t cnts;
		struct {
#ifdef __LITTLE_ENDIAN
			u8 wmode;	/* Writer mode   */
			u8 rcnts[3];	/* Reader counts */
			u8 wlocked;	/* Locked for write? */
			u8 __lstate[3];
#else
			u8 rcnts[3];	/* Reader counts */
			u8 wmode;	/* Writer mode   */
			u8 __lstate[3];
			u8 wlocked;	/* Locked for write? */
#endif
		};
	};
+5 −15
Original line number Diff line number Diff line
@@ -39,8 +39,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock)
		 * so spin with ACQUIRE semantics until the lock is available
		 * without waiting in the queue.
		 */
		atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK)
					 != _QW_LOCKED);
		atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
		return;
	}
	atomic_sub(_QR_BIAS, &lock->cnts);
@@ -56,7 +55,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock)
	 * that accesses can't leak upwards out of our subsequent critical
	 * section in the case that the lock is currently held for write.
	 */
	atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK) != _QW_LOCKED);
	atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));

	/*
	 * Signal the next one in queue to become queue head
@@ -79,19 +78,10 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
	    (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
		goto unlock;

	/*
	 * Set the waiting flag to notify readers that a writer is pending,
	 * or wait for a previous writer to go away.
	 */
	for (;;) {
		if (!READ_ONCE(lock->wmode) &&
		   (cmpxchg_relaxed(&lock->wmode, 0, _QW_WAITING) == 0))
			break;

		cpu_relax();
	}
	/* Set the waiting flag to notify readers that a writer is pending */
	atomic_add(_QW_WAITING, &lock->cnts);

	/* When no more readers, set the locked flag */
	/* When no more readers or writers, set the locked flag */
	do {
		atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
	} while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,