Commit 4d3199e4 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Ingo Molnar
Browse files

locking: Remove ACCESS_ONCE() usage

With the new standardized functions, we can replace all
ACCESS_ONCE() calls across relevant locking - this includes
lockref and seqlock while at it.

ACCESS_ONCE() does not work reliably on non-scalar types.
For example gcc 4.6 and 4.7 might remove the volatile tag
for such accesses during the SRA (scalar replacement of
aggregates) step:

  https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145



Update the new calls regardless of if it is a scalar type,
this is cleaner than having three alternatives.

Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1424662301.6539.18.camel@stgolabs.net


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 2ae79026
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -108,7 +108,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
	unsigned ret;

repeat:
	ret = ACCESS_ONCE(s->sequence);
	ret = READ_ONCE(s->sequence);
	if (unlikely(ret & 1)) {
		cpu_relax();
		goto repeat;
@@ -127,7 +127,7 @@ repeat:
 */
static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
	unsigned ret = ACCESS_ONCE(s->sequence);
	unsigned ret = READ_ONCE(s->sequence);
	smp_rmb();
	return ret;
}
@@ -179,7 +179,7 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
 */
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
	unsigned ret = ACCESS_ONCE(s->sequence);
	unsigned ret = READ_ONCE(s->sequence);
	smp_rmb();
	return ret & ~1;
}
+3 −3
Original line number Diff line number Diff line
@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
		 */
		return;
	}
	ACCESS_ONCE(prev->next) = node;
	WRITE_ONCE(prev->next, node);

	/* Wait until the lock holder passes the lock down. */
	arch_mcs_spin_lock_contended(&node->locked);
@@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
static inline
void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
{
	struct mcs_spinlock *next = ACCESS_ONCE(node->next);
	struct mcs_spinlock *next = READ_ONCE(node->next);

	if (likely(!next)) {
		/*
@@ -100,7 +100,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
		if (likely(cmpxchg(lock, node, NULL) == node))
			return;
		/* Wait until the next pointer is set */
		while (!(next = ACCESS_ONCE(node->next)))
		while (!(next = READ_ONCE(node->next)))
			cpu_relax_lowlatency();
	}

+4 −4
Original line number Diff line number Diff line
@@ -266,7 +266,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
		return 0;

	rcu_read_lock();
	owner = ACCESS_ONCE(lock->owner);
	owner = READ_ONCE(lock->owner);
	if (owner)
		retval = owner->on_cpu;
	rcu_read_unlock();
@@ -340,7 +340,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
			 * As such, when deadlock detection needs to be
			 * performed the optimistic spinning cannot be done.
			 */
			if (ACCESS_ONCE(ww->ctx))
			if (READ_ONCE(ww->ctx))
				break;
		}

@@ -348,7 +348,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
		owner = ACCESS_ONCE(lock->owner);
		owner = READ_ONCE(lock->owner);
		if (owner && !mutex_spin_on_owner(lock, owner))
			break;

@@ -487,7 +487,7 @@ static inline int __sched
__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
{
	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
	struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);

	if (!hold_ctx)
		return 0;
+7 −7
Original line number Diff line number Diff line
@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)

	prev = decode_cpu(old);
	node->prev = prev;
	ACCESS_ONCE(prev->next) = node;
	WRITE_ONCE(prev->next, node);

	/*
	 * Normally @prev is untouchable after the above store; because at that
@@ -109,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
	 * cmpxchg in an attempt to undo our queueing.
	 */

	while (!ACCESS_ONCE(node->locked)) {
	while (!READ_ONCE(node->locked)) {
		/*
		 * If we need to reschedule bail... so we can block.
		 */
@@ -148,7 +148,7 @@ unqueue:
		 * Or we race against a concurrent unqueue()'s step-B, in which
		 * case its step-C will write us a new @node->prev pointer.
		 */
		prev = ACCESS_ONCE(node->prev);
		prev = READ_ONCE(node->prev);
	}

	/*
@@ -170,8 +170,8 @@ unqueue:
	 * it will wait in Step-A.
	 */

	ACCESS_ONCE(next->prev) = prev;
	ACCESS_ONCE(prev->next) = next;
	WRITE_ONCE(next->prev, prev);
	WRITE_ONCE(prev->next, next);

	return false;
}
@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
	node = this_cpu_ptr(&osq_node);
	next = xchg(&node->next, NULL);
	if (next) {
		ACCESS_ONCE(next->locked) = 1;
		WRITE_ONCE(next->locked, 1);
		return;
	}

	next = osq_wait_next(lock, node, NULL);
	if (next)
		ACCESS_ONCE(next->locked) = 1;
		WRITE_ONCE(next->locked, 1);
}
+5 −5
Original line number Diff line number Diff line
@@ -279,7 +279,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
 */
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
{
	long old, count = ACCESS_ONCE(sem->count);
	long old, count = READ_ONCE(sem->count);

	while (true) {
		if (!(count == 0 || count == RWSEM_WAITING_BIAS))
@@ -304,9 +304,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
		return false;

	rcu_read_lock();
	owner = ACCESS_ONCE(sem->owner);
	owner = READ_ONCE(sem->owner);
	if (!owner) {
		long count = ACCESS_ONCE(sem->count);
		long count = READ_ONCE(sem->count);
		/*
		 * If sem->owner is not set, yet we have just recently entered the
		 * slowpath with the lock being active, then there is a possibility
@@ -385,7 +385,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
		goto done;

	while (true) {
		owner = ACCESS_ONCE(sem->owner);
		owner = READ_ONCE(sem->owner);
		if (owner && !rwsem_spin_on_owner(sem, owner))
			break;

@@ -459,7 +459,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)

	/* we're now waiting on the lock, but no longer actively locking */
	if (waiting) {
		count = ACCESS_ONCE(sem->count);
		count = READ_ONCE(sem->count);

		/*
		 * If there were already threads queued before us and there are
Loading