Commit 145d9785 authored by Babu Moger's avatar Babu Moger Committed by David S. Miller
Browse files

arch/sparc: Enable queued spinlock support for SPARC

This patch makes the necessary changes in SPARC architecture to enable
queued spinlock support. Here are some of the earlier discussions about
this feature.
https://lwn.net/Articles/561775/
https://lwn.net/Articles/590243/



Cleaned-up the spinlock_64.h. The definitions of arch_spin_xxx are
replaced by the function in <asm-generic/qspinlock.h>

Signed-off-by: default avatarBabu Moger <babu.moger@oracle.com>
Reviewed-by: default avatarHåkon Bugge <haakon.bugge@oracle.com>
Reviewed-by: default avatarJane Chu <jane.chu@oracle.com>
Reviewed-by: default avatarShannon Nelson <shannon.nelson@oracle.com>
Reviewed-by: default avatarVijay Kumar <vijay.ac.kumar@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 79d39e2b
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -84,6 +84,7 @@ config SPARC64
	select HAVE_NMI
	select HAVE_REGS_AND_STACK_ACCESS_API
	select ARCH_USE_QUEUED_RWLOCKS
	select ARCH_USE_QUEUED_SPINLOCKS

config ARCH_DEFCONFIG
	string
+7 −0
Original line number Diff line number Diff line
#ifndef _ASM_SPARC_QSPINLOCK_H
#define _ASM_SPARC_QSPINLOCK_H

#include <asm-generic/qspinlock_types.h>
#include <asm-generic/qspinlock.h>

#endif /* _ASM_SPARC_QSPINLOCK_H */
+1 −83
Original line number Diff line number Diff line
@@ -11,89 +11,7 @@
#include <asm/processor.h>
#include <asm/barrier.h>
#include <asm/qrwlock.h>

/* To get debugging spinlocks which detect and catch
 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
 * and rebuild your kernel.
 */

/* Because we play games to save cycles in the non-contention case, we
 * need to be extra careful about branch targets into the "spinning"
 * code.  They live in their own section, but the newer V9 branches
 * have a shorter range than the traditional 32-bit sparc branch
 * variants.  The rule is that the branches that go into and out of
 * the spinner sections must be pre-V9 branches.
 */

#define arch_spin_is_locked(lp)	((lp)->lock != 0)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline void arch_spin_lock(arch_spinlock_t *lock)
{
	unsigned long tmp;

	__asm__ __volatile__(
"1:	ldstub		[%1], %0\n"
"	brnz,pn		%0, 2f\n"
"	 nop\n"
"	.subsection	2\n"
"2:	ldub		[%1], %0\n"
"	brnz,pt		%0, 2b\n"
"	 nop\n"
"	ba,a,pt		%%xcc, 1b\n"
"	.previous"
	: "=&r" (tmp)
	: "r" (lock)
	: "memory");
}

static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	unsigned long result;

	__asm__ __volatile__(
"	ldstub		[%1], %0\n"
	: "=r" (result)
	: "r" (lock)
	: "memory");

	return (result == 0UL);
}

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	__asm__ __volatile__(
"	stb		%%g0, [%0]"
	: /* No outputs */
	: "r" (lock)
	: "memory");
}

static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
	unsigned long tmp1, tmp2;

	__asm__ __volatile__(
"1:	ldstub		[%2], %0\n"
"	brnz,pn		%0, 2f\n"
"	 nop\n"
"	.subsection	2\n"
"2:	rdpr		%%pil, %1\n"
"	wrpr		%3, %%pil\n"
"3:	ldub		[%2], %0\n"
"	brnz,pt		%0, 3b\n"
"	 nop\n"
"	ba,pt		%%xcc, 1b\n"
"	 wrpr		%1, %%pil\n"
"	.previous"
	: "=&r" (tmp1), "=&r" (tmp2)
	: "r"(lock), "r"(flags)
	: "memory");
}
#include <asm/qspinlock.h>

#define arch_read_lock_flags(p, f) arch_read_lock(p)
#define arch_write_lock_flags(p, f) arch_write_lock(p)
+5 −0
Original line number Diff line number Diff line
#ifndef __SPARC_SPINLOCK_TYPES_H
#define __SPARC_SPINLOCK_TYPES_H

#ifdef CONFIG_QUEUED_SPINLOCKS
#include <asm-generic/qspinlock_types.h>
#else

typedef struct {
	volatile unsigned char lock;
} arch_spinlock_t;

#define __ARCH_SPIN_LOCK_UNLOCKED	{ 0 }
#endif /* CONFIG_QUEUED_SPINLOCKS */

#ifdef CONFIG_QUEUED_RWLOCKS
#include <asm-generic/qrwlock_types.h>