Commit bb7cdd38 authored by Will Deacon's avatar Will Deacon
Browse files

alpha: Replace smp_read_barrier_depends() usage with smp_[r]mb()



In preparation for removing smp_read_barrier_depends() altogether,
move the Alpha code over to using smp_rmb() and smp_mb() directly.

Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 71c0b9a6
Loading
Loading
Loading
Loading
+8 −8
Original line number Diff line number Diff line
@@ -16,10 +16,10 @@

/*
 * To ensure dependency ordering is preserved for the _relaxed and
 * _release atomics, an smp_read_barrier_depends() is unconditionally
 * inserted into the _relaxed variants, which are used to build the
 * barriered versions. Avoid redundant back-to-back fences in the
 * _acquire and _fence versions.
 * _release atomics, an smp_mb() is unconditionally inserted into the
 * _relaxed variants, which are used to build the barriered versions.
 * Avoid redundant back-to-back fences in the _acquire and _fence
 * versions.
 */
#define __atomic_acquire_fence()
#define __atomic_post_full_fence()
@@ -70,7 +70,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
	smp_read_barrier_depends();					\
	smp_mb();							\
	return result;							\
}

@@ -88,7 +88,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
	smp_read_barrier_depends();					\
	smp_mb();							\
	return result;							\
}

@@ -123,7 +123,7 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
	smp_read_barrier_depends();					\
	smp_mb();							\
	return result;							\
}

@@ -141,7 +141,7 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
	smp_read_barrier_depends();					\
	smp_mb();							\
	return result;							\
}

+5 −5
Original line number Diff line number Diff line
@@ -277,9 +277,9 @@ extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; retur
extern inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= __ACCESS_BITS; return pte; }

/*
 * The smp_read_barrier_depends() in the following functions are required to
 * order the load of *dir (the pointer in the top level page table) with any
 * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
 * The smp_rmb() in the following functions are required to order the load of
 * *dir (the pointer in the top level page table) with any subsequent load of
 * the returned pmd_t *ret (ret is data dependent on *dir).
 *
 * If this ordering is not enforced, the CPU might load an older value of
 * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
@@ -293,7 +293,7 @@ extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; retu
extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
{
	pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
	smp_read_barrier_depends(); /* see above */
	smp_rmb(); /* see above */
	return ret;
}
#define pmd_offset pmd_offset
@@ -303,7 +303,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
{
	pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
		+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
	smp_read_barrier_depends(); /* see above */
	smp_rmb(); /* see above */
	return ret;
}
#define pte_offset_kernel pte_offset_kernel
+1 −1
Original line number Diff line number Diff line
@@ -437,7 +437,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
	 * of a chain of data-dependent loads, meaning most CPUs (alpha
	 * being the notable exception) will already guarantee loads are
	 * seen in-order. See the alpha page table accessors for the
	 * smp_read_barrier_depends() barriers in page table walking code.
	 * smp_rmb() barriers in page table walking code.
	 */
	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */