Commit 00f3c5a3 authored by Waiman Long's avatar Waiman Long Committed by Ingo Molnar
Browse files

locking/rwsem: Always release wait_lock before waking up tasks



With the use of wake_q, we can do task wakeups without holding the
wait_lock. There is one exception in the rwsem code, though. It is
when the writer in the slowpath detects that there are waiters ahead
but the rwsem is not held by a writer. This can lead to a long wait_lock
hold time especially when a large number of readers are to be woken up.

Remediate this situation by releasing the wait_lock before waking
up tasks and re-acquiring it afterward. The rwsem_try_write_lock()
function is also modified to read the rwsem count directly to avoid
stale count value.

Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: huang ying <huang.ying.caritas@gmail.com>
Link: https://lkml.kernel.org/r/20190520205918.22251-9-longman@redhat.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4f23dbc1
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -51,6 +51,11 @@ static inline void wake_q_init(struct wake_q_head *head)
	head->lastp = &head->first;
}

static inline bool wake_q_empty(struct wake_q_head *head)
{
	return head->first == WAKE_Q_TAIL;
}

extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head);
+15 −16
Original line number Diff line number Diff line
@@ -400,13 +400,14 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
 * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
 * bit is set or the lock is acquired with handoff bit cleared.
 */
static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem,
static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
					enum writer_wait_state wstate)
{
	long new;
	long count, new;

	lockdep_assert_held(&sem->wait_lock);

	count = atomic_long_read(&sem->count);
	do {
		bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);

@@ -751,26 +752,25 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
					? RWSEM_WAKE_READERS
					: RWSEM_WAKE_ANY, &wake_q);

		if (!wake_q_empty(&wake_q)) {
			/*
		 * The wakeup is normally called _after_ the wait_lock
		 * is released, but given that we are proactively waking
		 * readers we can deal with the wake_q overhead as it is
		 * similar to releasing and taking the wait_lock again
		 * for attempting rwsem_try_write_lock().
			 * We want to minimize wait_lock hold time especially
			 * when a large number of readers are to be woken up.
			 */
			raw_spin_unlock_irq(&sem->wait_lock);
			wake_up_q(&wake_q);

		/* We need wake_q again below, reinitialize */
		wake_q_init(&wake_q);
			wake_q_init(&wake_q);	/* Used again, reinit */
			raw_spin_lock_irq(&sem->wait_lock);
		}
	} else {
		count = atomic_long_add_return(RWSEM_FLAG_WAITERS, &sem->count);
		atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
	}

wait:
	/* wait until we successfully acquire the lock */
	set_current_state(state);
	while (true) {
		if (rwsem_try_write_lock(count, sem, wstate))
		if (rwsem_try_write_lock(sem, wstate))
			break;

		raw_spin_unlock_irq(&sem->wait_lock);
@@ -811,7 +811,6 @@ wait:
		}

		raw_spin_lock_irq(&sem->wait_lock);
		count = atomic_long_read(&sem->count);
	}
	__set_current_state(TASK_RUNNING);
	list_del(&waiter.list);