Commit 49ca6462 authored by Will Deacon's avatar Will Deacon
Browse files

ia64/mmiowb: Add unconditional mmiowb() to arch_spin_unlock()



The mmiowb() macro is horribly difficult to use and drivers will continue
to work most of the time if they omit a call when it is required.

Rather than rely on driver authors getting this right, push mmiowb() into
arch_spin_unlock() for ia64. If this is deemed to be a performance issue,
a subsequent optimisation could make use of ARCH_HAS_MMIOWB to elide
the barrier in cases where no I/O writes were performed inside the
critical section.

Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 346e91ee
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -5,7 +5,6 @@ generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
generic-y += preempt.h
generic-y += trace_clock.h
generic-y += vtime.h
+0 −17
Original line number Diff line number Diff line
@@ -113,20 +113,6 @@ extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
 */
#define __ia64_mf_a()	ia64_mfa()

/**
 * ___ia64_mmiowb - I/O write barrier
 *
 * Ensure ordering of I/O space writes.  This will make sure that writes
 * following the barrier will arrive after all previous writes.  For most
 * ia64 platforms, this is a simple 'mf.a' instruction.
 *
 * See Documentation/driver-api/device-io.rst for more information.
 */
static inline void ___ia64_mmiowb(void)
{
	ia64_mfa();
}

static inline void*
__ia64_mk_io_addr (unsigned long port)
{
@@ -161,7 +147,6 @@ __ia64_mk_io_addr (unsigned long port)
#define __ia64_writew	___ia64_writew
#define __ia64_writel	___ia64_writel
#define __ia64_writeq	___ia64_writeq
#define __ia64_mmiowb	___ia64_mmiowb

/*
 * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
@@ -296,7 +281,6 @@ __outsl (unsigned long port, const void *src, unsigned long count)
#define __outb		platform_outb
#define __outw		platform_outw
#define __outl		platform_outl
#define __mmiowb	platform_mmiowb

#define inb(p)		__inb(p)
#define inw(p)		__inw(p)
@@ -310,7 +294,6 @@ __outsl (unsigned long port, const void *src, unsigned long count)
#define outsb(p,s,c)	__outsb(p,s,c)
#define outsw(p,s,c)	__outsw(p,s,c)
#define outsl(p,s,c)	__outsl(p,s,c)
#define mmiowb()	__mmiowb()

/*
 * The address passed to these functions are ioremap()ped already.
+25 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef _ASM_IA64_MMIOWB_H
#define _ASM_IA64_MMIOWB_H

#include <asm/machvec.h>

/**
 * ___ia64_mmiowb - I/O write barrier
 *
 * Ensure ordering of I/O space writes.  This will make sure that writes
 * following the barrier will arrive after all previous writes.  For most
 * ia64 platforms, this is a simple 'mf.a' instruction.
 */
static inline void ___ia64_mmiowb(void)
{
	ia64_mfa();
}

#define __ia64_mmiowb	___ia64_mmiowb
#define mmiowb()	platform_mmiowb()

#include <asm-generic/mmiowb.h>

#endif	/* _ASM_IA64_MMIOWB_H */
+2 −0
Original line number Diff line number Diff line
@@ -73,6 +73,8 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
	unsigned short	*p = (unsigned short *)&lock->lock + 1, tmp;

	/* This could be optimised with ARCH_HAS_MMIOWB */
	mmiowb();
	asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
	WRITE_ONCE(*p, (tmp + 2) & ~1);
}