Commit b012980d authored by Will Deacon's avatar Will Deacon
Browse files

riscv/mmiowb: Hook up mmwiob() implementation to asm-generic code



In a bid to kill off explicit mmiowb() usage in driver code, hook up
the asm-generic mmiowb() tracking code for riscv, so that an mmiowb()
is automatically issued from spin_unlock() if an I/O write was performed
in the critical section.

Reviewed-by: default avatarPalmer Dabbelt <palmer@sifive.com>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 420af155
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -48,6 +48,7 @@ config RISCV
	select RISCV_TIMER
	select GENERIC_IRQ_MULTI_HANDLER
	select ARCH_HAS_PTE_SPECIAL
	select ARCH_HAS_MMIOWB
	select HAVE_EBPF_JIT if 64BIT

config MMU
+0 −1
Original line number Diff line number Diff line
@@ -21,7 +21,6 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
generic-y += mutex.h
generic-y += percpu.h
generic-y += preempt.h
+2 −13
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@
#define _ASM_RISCV_IO_H

#include <linux/types.h>
#include <asm/mmiowb.h>

extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);

@@ -99,18 +100,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
}
#endif

/*
 * FIXME: I'm flip-flopping on whether or not we should keep this or enforce
 * the ordering with I/O on spinlocks like PowerPC does.  The worry is that
 * drivers won't get this correct, but I also don't want to introduce a fence
 * into the lock code that otherwise only uses AMOs (and is essentially defined
 * by the ISA to be correct).   For now I'm leaving this here: "o,w" is
 * sufficient to ensure that all writes to the device have completed before the
 * write to the spinlock is allowed to commit.  I surmised this from reading
 * "ACQUIRES VS I/O ACCESSES" in memory-barriers.txt.
 */
#define mmiowb()	__asm__ __volatile__ ("fence o,w" : : : "memory");

/*
 * Unordered I/O memory access primitives.  These are even more relaxed than
 * the relaxed versions, as they don't even order accesses between successive
@@ -165,7 +154,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define __io_br()	do {} while (0)
#define __io_ar(v)	__asm__ __volatile__ ("fence i,r" : : : "memory");
#define __io_bw()	__asm__ __volatile__ ("fence w,o" : : : "memory");
#define __io_aw()	do {} while (0)
#define __io_aw()	mmiowb_set_pending()

#define readb(c)	({ u8  __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
#define readw(c)	({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
+14 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef _ASM_RISCV_MMIOWB_H
#define _ASM_RISCV_MMIOWB_H

/*
 * "o,w" is sufficient to ensure that all writes to the device have completed
 * before the write to the spinlock is allowed to commit.
 */
#define mmiowb()	__asm__ __volatile__ ("fence o,w" : : : "memory");

#include <asm-generic/mmiowb.h>

#endif	/* ASM_RISCV_MMIOWB_H */