Commit fbdc8f0f authored by Helge Deller's avatar Helge Deller
Browse files

parisc: Rework arch_rw locking functions



Clean up the arch read/write locking functions based on the arc
implemenation. This improves readability of those functions.

Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 2772f0ef
Loading
Loading
Loading
Loading
+56 −79
Original line number Diff line number Diff line
@@ -67,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)

/*
 * Read-write spinlocks, allowing multiple readers but only one writer.
 * Linux rwlocks are unfair to writers; they can be starved for an indefinite
 * time by readers.  With care, they can also be taken in interrupt context.
 * Unfair locking as Writers could be starved indefinitely by Reader(s)
 *
 * In the PA-RISC implementation, we have a spinlock and a counter.
 * Readers use the lock to serialise their access to the counter (which
 * records how many readers currently hold the lock).
 * Writers hold the spinlock, preventing any readers or other writers from
 * grabbing the rwlock.
 * The spinlock itself is contained in @counter and access to it is
 * serialized with @lock_mutex.
 */

/* Note that we have to ensure interrupts are disabled in case we're
 * interrupted by some other code that wants to grab the same read lock */
static  __inline__ void arch_read_lock(arch_rwlock_t *rw)
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
	int ret = 0;
	unsigned long flags;
	local_irq_save(flags);
	arch_spin_lock_flags(&rw->lock, flags);
	rw->counter++;
	arch_spin_unlock(&rw->lock);
	local_irq_restore(flags);
}

/* Note that we have to ensure interrupts are disabled in case we're
 * interrupted by some other code that wants to grab the same read lock */
static  __inline__ void arch_read_unlock(arch_rwlock_t *rw)
{
	unsigned long flags;
	local_irq_save(flags);
	arch_spin_lock_flags(&rw->lock, flags);
	rw->counter--;
	arch_spin_unlock(&rw->lock);
	local_irq_restore(flags);
}
	arch_spin_lock(&(rw->lock_mutex));

/* Note that we have to ensure interrupts are disabled in case we're
 * interrupted by some other code that wants to grab the same read lock */
static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
{
	unsigned long flags;
 retry:
	local_irq_save(flags);
	if (arch_spin_trylock(&rw->lock)) {
		rw->counter++;
		arch_spin_unlock(&rw->lock);
		local_irq_restore(flags);
		return 1;
	/*
	 * zero means writer holds the lock exclusively, deny Reader.
	 * Otherwise grant lock to first/subseq reader
	 */
	if (rw->counter > 0) {
		rw->counter--;
		ret = 1;
	}

	arch_spin_unlock(&(rw->lock_mutex));
	local_irq_restore(flags);
	/* If write-locked, we fail to acquire the lock */
	if (rw->counter < 0)
		return 0;

	/* Wait until we have a realistic chance at the lock */
	while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
		cpu_relax();

	goto retry;
	return ret;
}

/* Note that we have to ensure interrupts are disabled in case we're
 * interrupted by some other code that wants to read_trylock() this lock */
static __inline__ void arch_write_lock(arch_rwlock_t *rw)
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
	int ret = 0;
	unsigned long flags;
retry:

	local_irq_save(flags);
	arch_spin_lock_flags(&rw->lock, flags);
	arch_spin_lock(&(rw->lock_mutex));

	if (rw->counter != 0) {
		arch_spin_unlock(&rw->lock);
	/*
	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
	 * deny writer. Otherwise if unlocked grant to writer
	 * Hence the claim that Linux rwlocks are unfair to writers.
	 * (can be starved for an indefinite time by readers).
	 */
	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
		rw->counter = 0;
		ret = 1;
	}
	arch_spin_unlock(&(rw->lock_mutex));
	local_irq_restore(flags);

		while (rw->counter != 0)
			cpu_relax();

		goto retry;
	return ret;
}

	rw->counter = -1; /* mark as write-locked */
	mb();
	local_irq_restore(flags);
static inline void arch_read_lock(arch_rwlock_t *rw)
{
	while (!arch_read_trylock(rw))
		cpu_relax();
}

static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
	rw->counter = 0;
	arch_spin_unlock(&rw->lock);
	while (!arch_write_trylock(rw))
		cpu_relax();
}

/* Note that we have to ensure interrupts are disabled in case we're
 * interrupted by some other code that wants to read_trylock() this lock */
static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
	unsigned long flags;
	int result = 0;

	local_irq_save(flags);
	if (arch_spin_trylock(&rw->lock)) {
		if (rw->counter == 0) {
			rw->counter = -1;
			result = 1;
		} else {
			/* Read-locked.  Oh well. */
			arch_spin_unlock(&rw->lock);
		}
	}
	arch_spin_lock(&(rw->lock_mutex));
	rw->counter++;
	arch_spin_unlock(&(rw->lock_mutex));
	local_irq_restore(flags);
}

	return result;
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
	unsigned long flags;

	local_irq_save(flags);
	arch_spin_lock(&(rw->lock_mutex));
	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
	arch_spin_unlock(&(rw->lock_mutex));
	local_irq_restore(flags);
}

#endif /* __ASM_SPINLOCK_H */
+11 −3
Original line number Diff line number Diff line
@@ -12,11 +12,19 @@ typedef struct {
#endif
} arch_spinlock_t;


/* counter:
 * Unlocked     : 0x0100_0000
 * Read lock(s) : 0x00FF_FFFF to 0x01  (Multiple Readers decrement it)
 * Write lock   : 0x0, but only if prior value is "unlocked" 0x0100_0000
 */
typedef struct {
	arch_spinlock_t lock;
	volatile int counter;
	arch_spinlock_t		lock_mutex;
	volatile unsigned int	counter;
} arch_rwlock_t;

#define __ARCH_RW_LOCK_UNLOCKED		{ __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#define __ARCH_RW_LOCK_UNLOCKED__       0x01000000
#define __ARCH_RW_LOCK_UNLOCKED         { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
					.counter = __ARCH_RW_LOCK_UNLOCKED__ }

#endif