Commit 6ffddfb9 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking/rwsem: Add ACQUIRE comments



Since we just reviewed read_slowpath for ACQUIRE correctness, add a
few coments to retain our findings.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 952041a8
Loading
Loading
Loading
Loading
+13 −5
Original line number Diff line number Diff line
@@ -1004,6 +1004,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
	atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
	adjustment = 0;
	if (rwsem_optimistic_spin(sem, false)) {
		/* rwsem_optimistic_spin() implies ACQUIRE on success */
		/*
		 * Wake up other readers in the wait list if the front
		 * waiter is a reader.
@@ -1018,6 +1019,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
		}
		return sem;
	} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
		/* rwsem_reader_phase_trylock() implies ACQUIRE on success */
		return sem;
	}

@@ -1071,10 +1073,10 @@ queue:
	wake_up_q(&wake_q);

	/* wait to be given the lock */
	while (true) {
	for (;;) {
		set_current_state(state);
		if (!smp_load_acquire(&waiter.task)) {
			/* Orders against rwsem_mark_wake()'s smp_store_release() */
			/* Matches rwsem_mark_wake()'s smp_store_release(). */
			break;
		}
		if (signal_pending_state(state, current)) {
@@ -1082,6 +1084,7 @@ queue:
			if (waiter.task)
				goto out_nolock;
			raw_spin_unlock_irq(&sem->wait_lock);
			/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
			break;
		}
		schedule();
@@ -1091,6 +1094,7 @@ queue:
	__set_current_state(TASK_RUNNING);
	lockevent_inc(rwsem_rlock);
	return sem;

out_nolock:
	list_del(&waiter.list);
	if (list_empty(&sem->wait_list)) {
@@ -1131,8 +1135,10 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)

	/* do optimistic spinning and steal lock if possible */
	if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
	    rwsem_optimistic_spin(sem, true))
	    rwsem_optimistic_spin(sem, true)) {
		/* rwsem_optimistic_spin() implies ACQUIRE on success */
		return sem;
	}

	/*
	 * Disable reader optimistic spinning for this rwsem after
@@ -1192,9 +1198,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
wait:
	/* wait until we successfully acquire the lock */
	set_current_state(state);
	while (true) {
		if (rwsem_try_write_lock(sem, wstate))
	for (;;) {
		if (rwsem_try_write_lock(sem, wstate)) {
			/* rwsem_try_write_lock() implies ACQUIRE on success */
			break;
		}

		raw_spin_unlock_irq(&sem->wait_lock);