Commit d6462858 authored by Will Deacon's avatar Will Deacon
Browse files

alpha: Override READ_ONCE() with barriered implementation



Rather then relying on the core code to use smp_read_barrier_depends()
as part of the READ_ONCE() definition, instead override __READ_ONCE()
in the Alpha code so that it generates the required mb() and then
implement smp_load_acquire() using the new macro to avoid redundant
back-to-back barriers from the generic implementation.

Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent b78b331a
Loading
Loading
Loading
Loading
+5 −54
Original line number Diff line number Diff line
@@ -2,64 +2,15 @@
#ifndef __BARRIER_H
#define __BARRIER_H

#include <asm/compiler.h>

#define mb()	__asm__ __volatile__("mb": : :"memory")
#define rmb()	__asm__ __volatile__("mb": : :"memory")
#define wmb()	__asm__ __volatile__("wmb": : :"memory")

/**
 * read_barrier_depends - Flush all pending reads that subsequents reads
 * depend on.
 *
 * No data-dependent reads from memory-like regions are ever reordered
 * over this barrier.  All reads preceding this primitive are guaranteed
 * to access memory (but not necessarily other CPUs' caches) before any
 * reads following this primitive that depend on the data return by
 * any of the preceding reads.  This primitive is much lighter weight than
 * rmb() on most CPUs, and is never heavier weight than is
 * rmb().
 *
 * These ordering constraints are respected by both the local CPU
 * and the compiler.
 *
 * Ordering is not guaranteed by anything other than these primitives,
 * not even by data dependencies.  See the documentation for
 * memory_barrier() for examples and URLs to more information.
 *
 * For example, the following code would force ordering (the initial
 * value of "a" is zero, "b" is one, and "p" is "&a"):
 *
 * <programlisting>
 *	CPU 0				CPU 1
 *
 *	b = 2;
 *	memory_barrier();
 *	p = &b;				q = p;
 *					read_barrier_depends();
 *					d = *q;
 * </programlisting>
 *
 * because the read of "*q" depends on the read of "p" and these
 * two reads are separated by a read_barrier_depends().  However,
 * the following code, with the same initial values for "a" and "b":
 *
 * <programlisting>
 *	CPU 0				CPU 1
 *
 *	a = 2;
 *	memory_barrier();
 *	b = 3;				y = b;
 *					read_barrier_depends();
 *					x = a;
 * </programlisting>
 *
 * does not enforce ordering, since there is no data dependency between
 * the read of "a" and the read of "b".  Therefore, on some CPUs, such
 * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
 * in cases like this where there are no data dependencies.
 */
#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
#define __smp_load_acquire(p)						\
({									\
	compiletime_assert_atomic_type(*p);				\
	__READ_ONCE(*p);						\
})

#ifdef CONFIG_SMP
#define __ASM_SMP_MB	"\tmb\n"
+35 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) 2019 Google LLC.
 */
#ifndef __ASM_RWONCE_H
#define __ASM_RWONCE_H

#ifdef CONFIG_SMP

#include <asm/barrier.h>

/*
 * Alpha is apparently daft enough to reorder address-dependent loads
 * on some CPU implementations. Knock some common sense into it with
 * a memory barrier in READ_ONCE().
 *
 * For the curious, more information about this unusual reordering is
 * available in chapter 15 of the "perfbook":
 *
 *  https://kernel.org/pub/linux/kernel/people/paulmck/perfbook/perfbook.html
 *
 */
#define __READ_ONCE(x)							\
({									\
	__unqual_scalar_typeof(x) __x =					\
		(*(volatile typeof(__x) *)(&(x)));			\
	mb();								\
	(typeof(x))__x;							\
})

#endif /* CONFIG_SMP */

#include <asm-generic/rwonce.h>

#endif /* __ASM_RWONCE_H */