Commit 84c65911 authored by Will Deacon's avatar Will Deacon Committed by Ingo Molnar
Browse files

locking/atomics, asm-generic/bitops/lock.h: Rewrite using atomic_fetch_*()



The lock bitops can be implemented more efficiently using the atomic_fetch_*()
ops, which provide finer-grained control over the memory ordering semantics
than the bitops.

Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arm-kernel@lists.infradead.org
Cc: yamada.masahiro@socionext.com
Link: https://lore.kernel.org/lkml/1529412794-17720-8-git-send-email-will.deacon@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e986a0d6
Loading
Loading
Loading
Loading
+56 −12
Original line number Diff line number Diff line
@@ -2,6 +2,10 @@
#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
#define _ASM_GENERIC_BITOPS_LOCK_H_

#include <linux/atomic.h>
#include <linux/compiler.h>
#include <asm/barrier.h>

/**
 * test_and_set_bit_lock - Set a bit and return its old value, for lock
 * @nr: Bit to set
@@ -11,7 +15,20 @@
 * the returned value is 0.
 * It can be used to implement bit locks.
 */
#define test_and_set_bit_lock(nr, addr)	test_and_set_bit(nr, addr)
static inline int test_and_set_bit_lock(unsigned int nr,
					volatile unsigned long *p)
{
	long old;
	unsigned long mask = BIT_MASK(nr);

	p += BIT_WORD(nr);
	if (READ_ONCE(*p) & mask)
		return 1;

	old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
	return !!(old & mask);
}


/**
 * clear_bit_unlock - Clear a bit in memory, for unlock
@@ -20,11 +37,11 @@
 *
 * This operation is atomic and provides release barrier semantics.
 */
#define clear_bit_unlock(nr, addr)	\
do {					\
	smp_mb__before_atomic();	\
	clear_bit(nr, addr);		\
} while (0)
static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{
	p += BIT_WORD(nr);
	atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
}

/**
 * __clear_bit_unlock - Clear a bit in memory, for unlock
@@ -37,11 +54,38 @@ do { \
 *
 * See for example x86's implementation.
 */
#define __clear_bit_unlock(nr, addr)	\
do {					\
	smp_mb__before_atomic();	\
	clear_bit(nr, addr);		\
} while (0)
static inline void __clear_bit_unlock(unsigned int nr,
				      volatile unsigned long *p)
{
	unsigned long old;

#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
	p += BIT_WORD(nr);
	old = READ_ONCE(*p);
	old &= ~BIT_MASK(nr);
	atomic_long_set_release((atomic_long_t *)p, old);
}

/**
 * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
 *                                     byte is negative, for unlock.
 * @nr: the bit to clear
 * @addr: the address to start counting from
 *
 * This is a bit of a one-trick-pony for the filemap code, which clears
 * PG_locked and tests PG_waiters,
 */
#ifndef clear_bit_unlock_is_negative_byte
static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
						     volatile unsigned long *p)
{
	long old;
	unsigned long mask = BIT_MASK(nr);

	p += BIT_WORD(nr);
	old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
	return !!(old & BIT(7));
}
#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
#endif

#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */