Commit 2fed0c50 authored by Glauber de Oliveira Costa's avatar Glauber de Oliveira Costa Committed by Ingo Molnar
Browse files

x86: consolidate spinlock.h



The cli and sti instructions need to be replaced by paravirt hooks.
For the i386 architecture, this is already done. The code requirements
aren't much different from x86_64 POV, so this part is consolidated in
the common header

Signed-off-by: default avatarGlauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Acked-by: default avatarJeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 6abcd98f
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
#ifndef _X86_SPINLOCK_H_
#define _X86_SPINLOCK_H_

#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define CLI_STRING	"cli"
#define STI_STRING	"sti"
#define CLI_STI_CLOBBERS
#define CLI_STI_INPUT_ARGS
#endif /* CONFIG_PARAVIRT */

#ifdef CONFIG_X86_32
# include "spinlock_32.h"
#else
# include "spinlock_64.h"
#endif

#endif
+26 −45
Original line number Diff line number Diff line
@@ -5,16 +5,6 @@
#include <asm/rwlock.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <linux/compiler.h>

#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define CLI_STRING	"cli"
#define STI_STRING	"sti"
#define CLI_STI_CLOBBERS
#define CLI_STI_INPUT_ARGS
#endif /* CONFIG_PARAVIRT */

/*
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
@@ -27,14 +17,15 @@
 * (the type definitions are in asm/spinlock_types.h)
 */

static inline int __raw_spin_is_locked(raw_spinlock_t *x)
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
	return *(volatile signed char *)(&(x)->slock) <= 0;
	return *(volatile signed char *)(&(lock)->slock) <= 0;
}

static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
	asm volatile("\n1:\t"
	asm volatile(
		"\n1:\t"
		LOCK_PREFIX " ; decb %0\n\t"
		"jns 3f\n"
		"2:\t"
@@ -55,7 +46,8 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
 */
#ifndef CONFIG_PROVE_LOCKING
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
					 unsigned long flags)
{
	asm volatile(
		"\n1:\t"
@@ -86,11 +78,13 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla

static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
	char oldval;
	signed char oldval;

	asm volatile(
		"xchgb %b0,%1"
		:"=q" (oldval), "+m" (lock->slock)
		:"0" (0) : "memory");

	return oldval > 0;
}

@@ -112,7 +106,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
	char oldval = 1;
	unsigned char oldval = 1;

	asm volatile("xchgb %b0, %1"
		     : "=q" (oldval), "+m" (lock->slock)
@@ -139,31 +133,16 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
 *
 * On x86, we implement read-write locks as a 32-bit counter
 * with the high bit (sign) being the "contended" bit.
 *
 * The inline assembly is non-obvious. Think about it.
 *
 * Changed to use the same technique as rw semaphores.  See
 * semaphore.h for details.  -ben
 *
 * the helpers are in arch/i386/kernel/semaphore.c
 */

/**
 * read_can_lock - would read_trylock() succeed?
 * @lock: the rwlock in question.
 */
static inline int __raw_read_can_lock(raw_rwlock_t *x)
static inline int __raw_read_can_lock(raw_rwlock_t *lock)
{
	return (int)(x)->lock > 0;
	return (int)(lock)->lock > 0;
}

/**
 * write_can_lock - would write_trylock() succeed?
 * @lock: the rwlock in question.
 */
static inline int __raw_write_can_lock(raw_rwlock_t *x)
static inline int __raw_write_can_lock(raw_rwlock_t *lock)
{
	return (x)->lock == RW_LOCK_BIAS;
	return (lock)->lock == RW_LOCK_BIAS;
}

static inline void __raw_read_lock(raw_rwlock_t *rw)
@@ -187,6 +166,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
	atomic_t *count = (atomic_t *)lock;

	atomic_dec(count);
	if (atomic_read(count) >= 0)
		return 1;
@@ -197,6 +177,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
	atomic_t *count = (atomic_t *)lock;

	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
		return 1;
	atomic_add(RW_LOCK_BIAS, count);
+24 −13
Original line number Diff line number Diff line
@@ -33,14 +33,21 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
		"cmpl $0,%0\n\t"
		"jle 3b\n\t"
		"jmp 1b\n"
		"2:\t" : "=m" (lock->slock) : : "memory");
		"2:\t"
		: "=m" (lock->slock) : : "memory");
}

/*
 * Same as __raw_spin_lock, but reenable interrupts during spinning.
 * It is easier for the lock validator if interrupts are not re-enabled
 * in the middle of a lock-acquire. This is a performance feature anyway
 * so we turn it off:
 *
 * NOTE: there's an irqs-on section here, which normally would have to be
 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
 */
#ifndef CONFIG_PROVE_LOCKING
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
					 unsigned long flags)
{
	asm volatile(
		"\n1:\t"
@@ -48,12 +55,12 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
		"jns 5f\n"
		"testl $0x200, %1\n\t"	/* interrupts were disabled? */
		"jz 4f\n\t"
	        "sti\n"
		STI_STRING "\n"
		"3:\t"
		"rep;nop\n\t"
		"cmpl $0, %0\n\t"
		"jle 3b\n\t"
		"cli\n\t"
		CLI_STRING "\n\t"
		"jmp 1b\n"
		"4:\t"
		"rep;nop\n\t"
@@ -61,7 +68,9 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
		"jg 1b\n\t"
		"jmp 4b\n"
		"5:\n\t"
		: "+m" (lock->slock) : "r" ((unsigned)flags) : "memory");
		: "+m" (lock->slock)
		: "r" ((unsigned)flags) CLI_STI_INPUT_ARGS
		: "memory" CLI_STI_CLOBBERS);
}
#endif

@@ -116,7 +125,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
		     "jns 1f\n"
		     "call __read_lock_failed\n"
		     "call __read_lock_failed\n\t"
		     "1:\n"
		     ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
}
@@ -125,7 +134,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
		     "jz 1f\n"
		     "\tcall __write_lock_failed\n\t"
		     "call __write_lock_failed\n\t"
		     "1:\n"
		     ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
}
@@ -133,6 +142,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
	atomic_t *count = (atomic_t *)lock;

	atomic_dec(count);
	if (atomic_read(count) >= 0)
		return 1;
@@ -143,6 +153,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
	atomic_t *count = (atomic_t *)lock;

	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
		return 1;
	atomic_add(RW_LOCK_BIAS, count);
@@ -151,12 +162,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)

static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
	asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
}

static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
	asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",%0"
				: "=m" (rw->lock) : : "memory");
}