Commit 346e91ee authored by Will Deacon's avatar Will Deacon
Browse files

mips/mmiowb: Add unconditional mmiowb() to arch_spin_unlock()



The mmiowb() macro is horribly difficult to use and drivers will continue
to work most of the time if they omit a call when it is required.

Rather than rely on driver authors getting this right, push mmiowb() into
arch_spin_unlock() for mips. If this is deemed to be a performance issue,
a subsequent optimisation could make use of ARCH_HAS_MMIOWB to elide
the barrier in cases where no I/O writes were performed inside the
critical section.

Acked-by: default avatarPaul Burton <paul.burton@mips.com>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent e9e8543f
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -12,7 +12,6 @@ generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
+0 −3
Original line number Diff line number Diff line
@@ -102,9 +102,6 @@ static inline void set_io_port_base(unsigned long base)
#define iobarrier_w() wmb()
#define iobarrier_sync() iob()

/* Some callers use this older API instead.  */
#define mmiowb() iobarrier_w()

/*
 *     virt_to_phys    -       map virtual addresses to physical
 *     @address: address to remap
+11 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_MMIOWB_H
#define _ASM_MMIOWB_H

#include <asm/io.h>

#define mmiowb()	iobarrier_w()

#include <asm-generic/mmiowb.h>

#endif	/* _ASM_MMIOWB_H */
+15 −0
Original line number Diff line number Diff line
@@ -11,6 +11,21 @@

#include <asm/processor.h>
#include <asm/qrwlock.h>

#include <asm-generic/qspinlock_types.h>

#define	queued_spin_unlock queued_spin_unlock
/**
 * queued_spin_unlock - release a queued spinlock
 * @lock : Pointer to queued spinlock structure
 */
static inline void queued_spin_unlock(struct qspinlock *lock)
{
	/* This could be optimised with ARCH_HAS_MMIOWB */
	mmiowb();
	smp_store_release(&lock->locked, 0);
}

#include <asm/qspinlock.h>

#endif /* _ASM_SPINLOCK_H */