Commit d1be6a28 authored by Will Deacon's avatar Will Deacon
Browse files

asm-generic/mmiowb: Add generic implementation of mmiowb() tracking



In preparation for removing all explicit mmiowb() calls from driver
code, implement a tracking system in asm-generic based loosely on the
PowerPC implementation. This allows architectures with a non-empty
mmiowb() definition to have the barrier automatically inserted in
spin_unlock() following a critical section containing an I/O write.

Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 4614bbde
Loading
Loading
Loading
Loading
+63 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_GENERIC_MMIOWB_H
#define __ASM_GENERIC_MMIOWB_H

/*
 * Generic implementation of mmiowb() tracking for spinlocks.
 *
 * If your architecture doesn't ensure that writes to an I/O peripheral
 * within two spinlocked sections on two different CPUs are seen by the
 * peripheral in the order corresponding to the lock handover, then you
 * need to follow these FIVE easy steps:
 *
 * 	1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy)
 *	   in asm/mmiowb.h, then #include this file
 *	2. Ensure your I/O write accessors call mmiowb_set_pending()
 *	3. Select ARCH_HAS_MMIOWB
 *	4. Untangle the resulting mess of header files
 *	5. Complain to your architects
 */
#ifdef CONFIG_MMIOWB

#include <linux/compiler.h>
#include <asm-generic/mmiowb_types.h>

#ifndef arch_mmiowb_state
#include <asm/percpu.h>
#include <asm/smp.h>

DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
#define __mmiowb_state()	this_cpu_ptr(&__mmiowb_state)
#else
#define __mmiowb_state()	arch_mmiowb_state()
#endif	/* arch_mmiowb_state */

static inline void mmiowb_set_pending(void)
{
	struct mmiowb_state *ms = __mmiowb_state();
	ms->mmiowb_pending = ms->nesting_count;
}

static inline void mmiowb_spin_lock(void)
{
	struct mmiowb_state *ms = __mmiowb_state();
	ms->nesting_count++;
}

static inline void mmiowb_spin_unlock(void)
{
	struct mmiowb_state *ms = __mmiowb_state();

	if (unlikely(ms->mmiowb_pending)) {
		ms->mmiowb_pending = 0;
		mmiowb();
	}

	ms->nesting_count--;
}
#else
#define mmiowb_set_pending()		do { } while (0)
#define mmiowb_spin_lock()		do { } while (0)
#define mmiowb_spin_unlock()		do { } while (0)
#endif	/* CONFIG_MMIOWB */
#endif	/* __ASM_GENERIC_MMIOWB_H */
+12 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_GENERIC_MMIOWB_TYPES_H
#define __ASM_GENERIC_MMIOWB_TYPES_H

#include <linux/types.h>

struct mmiowb_state {
	u16	nesting_count;
	u16	mmiowb_pending;
};

#endif	/* __ASM_GENERIC_MMIOWB_TYPES_H */
+7 −0
Original line number Diff line number Diff line
@@ -251,3 +251,10 @@ config ARCH_USE_QUEUED_RWLOCKS
config QUEUED_RWLOCKS
	def_bool y if ARCH_USE_QUEUED_RWLOCKS
	depends on SMP

config ARCH_HAS_MMIOWB
	bool

config MMIOWB
	def_bool y if ARCH_HAS_MMIOWB
	depends on SMP
+7 −0
Original line number Diff line number Diff line
@@ -22,6 +22,13 @@
#include <linux/debug_locks.h>
#include <linux/export.h>

#ifdef CONFIG_MMIOWB
#ifndef arch_mmiowb_state
DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
#endif
#endif

/*
 * If lockdep is enabled then we use the non-preemption spin-ops
 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are