Commit 8bf705d1 authored by Dmitry Vyukov's avatar Dmitry Vyukov Committed by Ingo Molnar
Browse files

locking/atomic/x86: Switch atomic.h to use atomic-instrumented.h



Add arch_ prefix to all atomic operations and include
<asm-generic/atomic-instrumented.h>. This will allow
to add KASAN instrumentation to all atomic ops.

Signed-off-by: default avatarDmitry Vyukov <dvyukov@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: kasan-dev@googlegroups.com
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/54f0eb64260b84199e538652e079a89b5423ad41.1517246437.git.dvyukov@google.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b06ed71a
Loading
Loading
Loading
Loading
+52 −50
Original line number Diff line number Diff line
@@ -17,36 +17,36 @@
#define ATOMIC_INIT(i)	{ (i) }

/**
 * atomic_read - read atomic variable
 * arch_atomic_read - read atomic variable
 * @v: pointer of type atomic_t
 *
 * Atomically reads the value of @v.
 */
static __always_inline int atomic_read(const atomic_t *v)
static __always_inline int arch_atomic_read(const atomic_t *v)
{
	return READ_ONCE((v)->counter);
}

/**
 * atomic_set - set atomic variable
 * arch_atomic_set - set atomic variable
 * @v: pointer of type atomic_t
 * @i: required value
 *
 * Atomically sets the value of @v to @i.
 */
static __always_inline void atomic_set(atomic_t *v, int i)
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
	WRITE_ONCE(v->counter, i);
}

/**
 * atomic_add - add integer to atomic variable
 * arch_atomic_add - add integer to atomic variable
 * @i: integer value to add
 * @v: pointer of type atomic_t
 *
 * Atomically adds @i to @v.
 */
static __always_inline void atomic_add(int i, atomic_t *v)
static __always_inline void arch_atomic_add(int i, atomic_t *v)
{
	asm volatile(LOCK_PREFIX "addl %1,%0"
		     : "+m" (v->counter)
@@ -54,13 +54,13 @@ static __always_inline void atomic_add(int i, atomic_t *v)
}

/**
 * atomic_sub - subtract integer from atomic variable
 * arch_atomic_sub - subtract integer from atomic variable
 * @i: integer value to subtract
 * @v: pointer of type atomic_t
 *
 * Atomically subtracts @i from @v.
 */
static __always_inline void atomic_sub(int i, atomic_t *v)
static __always_inline void arch_atomic_sub(int i, atomic_t *v)
{
	asm volatile(LOCK_PREFIX "subl %1,%0"
		     : "+m" (v->counter)
@@ -68,7 +68,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
}

/**
 * atomic_sub_and_test - subtract value from variable and test result
 * arch_atomic_sub_and_test - subtract value from variable and test result
 * @i: integer value to subtract
 * @v: pointer of type atomic_t
 *
@@ -76,63 +76,63 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
 * true if the result is zero, or false for all
 * other cases.
 */
static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{
	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
}

/**
 * atomic_inc - increment atomic variable
 * arch_atomic_inc - increment atomic variable
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1.
 */
static __always_inline void atomic_inc(atomic_t *v)
static __always_inline void arch_atomic_inc(atomic_t *v)
{
	asm volatile(LOCK_PREFIX "incl %0"
		     : "+m" (v->counter));
}

/**
 * atomic_dec - decrement atomic variable
 * arch_atomic_dec - decrement atomic variable
 * @v: pointer of type atomic_t
 *
 * Atomically decrements @v by 1.
 */
static __always_inline void atomic_dec(atomic_t *v)
static __always_inline void arch_atomic_dec(atomic_t *v)
{
	asm volatile(LOCK_PREFIX "decl %0"
		     : "+m" (v->counter));
}

/**
 * atomic_dec_and_test - decrement and test
 * arch_atomic_dec_and_test - decrement and test
 * @v: pointer of type atomic_t
 *
 * Atomically decrements @v by 1 and
 * returns true if the result is 0, or false for all other
 * cases.
 */
static __always_inline bool atomic_dec_and_test(atomic_t *v)
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{
	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
}

/**
 * atomic_inc_and_test - increment and test
 * arch_atomic_inc_and_test - increment and test
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
static __always_inline bool atomic_inc_and_test(atomic_t *v)
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{
	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
}

/**
 * atomic_add_negative - add and test if negative
 * arch_atomic_add_negative - add and test if negative
 * @i: integer value to add
 * @v: pointer of type atomic_t
 *
@@ -140,65 +140,65 @@ static __always_inline bool atomic_inc_and_test(atomic_t *v)
 * if the result is negative, or false when
 * result is greater than or equal to zero.
 */
static __always_inline bool atomic_add_negative(int i, atomic_t *v)
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{
	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
}

/**
 * atomic_add_return - add integer and return
 * arch_atomic_add_return - add integer and return
 * @i: integer value to add
 * @v: pointer of type atomic_t
 *
 * Atomically adds @i to @v and returns @i + @v
 */
static __always_inline int atomic_add_return(int i, atomic_t *v)
static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
	return i + xadd(&v->counter, i);
}

/**
 * atomic_sub_return - subtract integer and return
 * arch_atomic_sub_return - subtract integer and return
 * @v: pointer of type atomic_t
 * @i: integer value to subtract
 *
 * Atomically subtracts @i from @v and returns @v - @i
 */
static __always_inline int atomic_sub_return(int i, atomic_t *v)
static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
{
	return atomic_add_return(-i, v);
	return arch_atomic_add_return(-i, v);
}

#define atomic_inc_return(v)  (atomic_add_return(1, v))
#define atomic_dec_return(v)  (atomic_sub_return(1, v))
#define arch_atomic_inc_return(v)  (arch_atomic_add_return(1, v))
#define arch_atomic_dec_return(v)  (arch_atomic_sub_return(1, v))

static __always_inline int atomic_fetch_add(int i, atomic_t *v)
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
	return xadd(&v->counter, i);
}

static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
{
	return xadd(&v->counter, -i);
}

static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
	return cmpxchg(&v->counter, old, new);
	return arch_cmpxchg(&v->counter, old, new);
}

#define atomic_try_cmpxchg atomic_try_cmpxchg
static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
	return try_cmpxchg(&v->counter, old, new);
}

static inline int atomic_xchg(atomic_t *v, int new)
static inline int arch_atomic_xchg(atomic_t *v, int new)
{
	return xchg(&v->counter, new);
}

static inline void atomic_and(int i, atomic_t *v)
static inline void arch_atomic_and(int i, atomic_t *v)
{
	asm volatile(LOCK_PREFIX "andl %1,%0"
			: "+m" (v->counter)
@@ -206,16 +206,16 @@ static inline void atomic_and(int i, atomic_t *v)
			: "memory");
}

static inline int atomic_fetch_and(int i, atomic_t *v)
static inline int arch_atomic_fetch_and(int i, atomic_t *v)
{
	int val = atomic_read(v);
	int val = arch_atomic_read(v);

	do { } while (!atomic_try_cmpxchg(v, &val, val & i));
	do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));

	return val;
}

static inline void atomic_or(int i, atomic_t *v)
static inline void arch_atomic_or(int i, atomic_t *v)
{
	asm volatile(LOCK_PREFIX "orl %1,%0"
			: "+m" (v->counter)
@@ -223,16 +223,16 @@ static inline void atomic_or(int i, atomic_t *v)
			: "memory");
}

static inline int atomic_fetch_or(int i, atomic_t *v)
static inline int arch_atomic_fetch_or(int i, atomic_t *v)
{
	int val = atomic_read(v);
	int val = arch_atomic_read(v);

	do { } while (!atomic_try_cmpxchg(v, &val, val | i));
	do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));

	return val;
}

static inline void atomic_xor(int i, atomic_t *v)
static inline void arch_atomic_xor(int i, atomic_t *v)
{
	asm volatile(LOCK_PREFIX "xorl %1,%0"
			: "+m" (v->counter)
@@ -240,17 +240,17 @@ static inline void atomic_xor(int i, atomic_t *v)
			: "memory");
}

static inline int atomic_fetch_xor(int i, atomic_t *v)
static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
{
	int val = atomic_read(v);
	int val = arch_atomic_read(v);

	do { } while (!atomic_try_cmpxchg(v, &val, val ^ i));
	do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));

	return val;
}

/**
 * __atomic_add_unless - add unless the number is already a given value
 * __arch_atomic_add_unless - add unless the number is already a given value
 * @v: pointer of type atomic_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
@@ -258,14 +258,14 @@ static inline int atomic_fetch_xor(int i, atomic_t *v)
 * Atomically adds @a to @v, so long as @v was not already @u.
 * Returns the old value of @v.
 */
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u)
{
	int c = atomic_read(v);
	int c = arch_atomic_read(v);

	do {
		if (unlikely(c == u))
			break;
	} while (!atomic_try_cmpxchg(v, &c, c + a));
	} while (!arch_atomic_try_cmpxchg(v, &c, c + a));

	return c;
}
@@ -276,4 +276,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
# include <asm/atomic64_64.h>
#endif

#include <asm-generic/atomic-instrumented.h>

#endif /* _ASM_X86_ATOMIC_H */
+54 −52
Original line number Diff line number Diff line
@@ -62,7 +62,7 @@ ATOMIC64_DECL(add_unless);
#undef ATOMIC64_EXPORT

/**
 * atomic64_cmpxchg - cmpxchg atomic64 variable
 * arch_atomic64_cmpxchg - cmpxchg atomic64 variable
 * @v: pointer to type atomic64_t
 * @o: expected value
 * @n: new value
@@ -71,20 +71,21 @@ ATOMIC64_DECL(add_unless);
 * the old value.
 */

static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
static inline long long arch_atomic64_cmpxchg(atomic64_t *v, long long o,
					      long long n)
{
	return cmpxchg64(&v->counter, o, n);
	return arch_cmpxchg64(&v->counter, o, n);
}

/**
 * atomic64_xchg - xchg atomic64 variable
 * arch_atomic64_xchg - xchg atomic64 variable
 * @v: pointer to type atomic64_t
 * @n: value to assign
 *
 * Atomically xchgs the value of @v to @n and returns
 * the old value.
 */
static inline long long atomic64_xchg(atomic64_t *v, long long n)
static inline long long arch_atomic64_xchg(atomic64_t *v, long long n)
{
	long long o;
	unsigned high = (unsigned)(n >> 32);
@@ -96,13 +97,13 @@ static inline long long atomic64_xchg(atomic64_t *v, long long n)
}

/**
 * atomic64_set - set atomic64 variable
 * arch_atomic64_set - set atomic64 variable
 * @v: pointer to type atomic64_t
 * @i: value to assign
 *
 * Atomically sets the value of @v to @n.
 */
static inline void atomic64_set(atomic64_t *v, long long i)
static inline void arch_atomic64_set(atomic64_t *v, long long i)
{
	unsigned high = (unsigned)(i >> 32);
	unsigned low = (unsigned)i;
@@ -112,12 +113,12 @@ static inline void atomic64_set(atomic64_t *v, long long i)
}

/**
 * atomic64_read - read atomic64 variable
 * arch_atomic64_read - read atomic64 variable
 * @v: pointer to type atomic64_t
 *
 * Atomically reads the value of @v and returns it.
 */
static inline long long atomic64_read(const atomic64_t *v)
static inline long long arch_atomic64_read(const atomic64_t *v)
{
	long long r;
	alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
@@ -125,13 +126,13 @@ static inline long long atomic64_read(const atomic64_t *v)
 }

/**
 * atomic64_add_return - add and return
 * arch_atomic64_add_return - add and return
 * @i: integer value to add
 * @v: pointer to type atomic64_t
 *
 * Atomically adds @i to @v and returns @i + *@v
 */
static inline long long atomic64_add_return(long long i, atomic64_t *v)
static inline long long arch_atomic64_add_return(long long i, atomic64_t *v)
{
	alternative_atomic64(add_return,
			     ASM_OUTPUT2("+A" (i), "+c" (v)),
@@ -142,7 +143,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
/*
 * Other variants with different arithmetic operators:
 */
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
static inline long long arch_atomic64_sub_return(long long i, atomic64_t *v)
{
	alternative_atomic64(sub_return,
			     ASM_OUTPUT2("+A" (i), "+c" (v)),
@@ -150,7 +151,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
	return i;
}

static inline long long atomic64_inc_return(atomic64_t *v)
static inline long long arch_atomic64_inc_return(atomic64_t *v)
{
	long long a;
	alternative_atomic64(inc_return, "=&A" (a),
@@ -158,7 +159,7 @@ static inline long long atomic64_inc_return(atomic64_t *v)
	return a;
}

static inline long long atomic64_dec_return(atomic64_t *v)
static inline long long arch_atomic64_dec_return(atomic64_t *v)
{
	long long a;
	alternative_atomic64(dec_return, "=&A" (a),
@@ -167,13 +168,13 @@ static inline long long atomic64_dec_return(atomic64_t *v)
}

/**
 * atomic64_add - add integer to atomic64 variable
 * arch_atomic64_add - add integer to atomic64 variable
 * @i: integer value to add
 * @v: pointer to type atomic64_t
 *
 * Atomically adds @i to @v.
 */
static inline long long atomic64_add(long long i, atomic64_t *v)
static inline long long arch_atomic64_add(long long i, atomic64_t *v)
{
	__alternative_atomic64(add, add_return,
			       ASM_OUTPUT2("+A" (i), "+c" (v)),
@@ -182,13 +183,13 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
}

/**
 * atomic64_sub - subtract the atomic64 variable
 * arch_atomic64_sub - subtract the atomic64 variable
 * @i: integer value to subtract
 * @v: pointer to type atomic64_t
 *
 * Atomically subtracts @i from @v.
 */
static inline long long atomic64_sub(long long i, atomic64_t *v)
static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
{
	__alternative_atomic64(sub, sub_return,
			       ASM_OUTPUT2("+A" (i), "+c" (v)),
@@ -197,7 +198,7 @@ static inline long long atomic64_sub(long long i, atomic64_t *v)
}

/**
 * atomic64_sub_and_test - subtract value from variable and test result
 * arch_atomic64_sub_and_test - subtract value from variable and test result
 * @i: integer value to subtract
 * @v: pointer to type atomic64_t
 *
@@ -205,46 +206,46 @@ static inline long long atomic64_sub(long long i, atomic64_t *v)
 * true if the result is zero, or false for all
 * other cases.
 */
static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
static inline int arch_atomic64_sub_and_test(long long i, atomic64_t *v)
{
	return atomic64_sub_return(i, v) == 0;
	return arch_atomic64_sub_return(i, v) == 0;
}

/**
 * atomic64_inc - increment atomic64 variable
 * arch_atomic64_inc - increment atomic64 variable
 * @v: pointer to type atomic64_t
 *
 * Atomically increments @v by 1.
 */
static inline void atomic64_inc(atomic64_t *v)
static inline void arch_atomic64_inc(atomic64_t *v)
{
	__alternative_atomic64(inc, inc_return, /* no output */,
			       "S" (v) : "memory", "eax", "ecx", "edx");
}

/**
 * atomic64_dec - decrement atomic64 variable
 * arch_atomic64_dec - decrement atomic64 variable
 * @v: pointer to type atomic64_t
 *
 * Atomically decrements @v by 1.
 */
static inline void atomic64_dec(atomic64_t *v)
static inline void arch_atomic64_dec(atomic64_t *v)
{
	__alternative_atomic64(dec, dec_return, /* no output */,
			       "S" (v) : "memory", "eax", "ecx", "edx");
}

/**
 * atomic64_dec_and_test - decrement and test
 * arch_atomic64_dec_and_test - decrement and test
 * @v: pointer to type atomic64_t
 *
 * Atomically decrements @v by 1 and
 * returns true if the result is 0, or false for all other
 * cases.
 */
static inline int atomic64_dec_and_test(atomic64_t *v)
static inline int arch_atomic64_dec_and_test(atomic64_t *v)
{
	return atomic64_dec_return(v) == 0;
	return arch_atomic64_dec_return(v) == 0;
}

/**
@@ -255,13 +256,13 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
 * and returns true if the result is zero, or false for all
 * other cases.
 */
static inline int atomic64_inc_and_test(atomic64_t *v)
static inline int arch_atomic64_inc_and_test(atomic64_t *v)
{
	return atomic64_inc_return(v) == 0;
	return arch_atomic64_inc_return(v) == 0;
}

/**
 * atomic64_add_negative - add and test if negative
 * arch_atomic64_add_negative - add and test if negative
 * @i: integer value to add
 * @v: pointer to type atomic64_t
 *
@@ -269,13 +270,13 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
 * if the result is negative, or false when
 * result is greater than or equal to zero.
 */
static inline int atomic64_add_negative(long long i, atomic64_t *v)
static inline int arch_atomic64_add_negative(long long i, atomic64_t *v)
{
	return atomic64_add_return(i, v) < 0;
	return arch_atomic64_add_return(i, v) < 0;
}

/**
 * atomic64_add_unless - add unless the number is a given value
 * arch_atomic64_add_unless - add unless the number is a given value
 * @v: pointer of type atomic64_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
@@ -283,7 +284,8 @@ static inline int atomic64_add_negative(long long i, atomic64_t *v)
 * Atomically adds @a to @v, so long as it was not @u.
 * Returns non-zero if the add was done, zero otherwise.
 */
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
					   long long u)
{
	unsigned low = (unsigned)u;
	unsigned high = (unsigned)(u >> 32);
@@ -294,7 +296,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
}


static inline int atomic64_inc_not_zero(atomic64_t *v)
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
	int r;
	alternative_atomic64(inc_not_zero, "=&a" (r),
@@ -302,7 +304,7 @@ static inline int atomic64_inc_not_zero(atomic64_t *v)
	return r;
}

static inline long long atomic64_dec_if_positive(atomic64_t *v)
static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
{
	long long r;
	alternative_atomic64(dec_if_positive, "=&A" (r),
@@ -313,70 +315,70 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#undef alternative_atomic64
#undef __alternative_atomic64

static inline void atomic64_and(long long i, atomic64_t *v)
static inline void arch_atomic64_and(long long i, atomic64_t *v)
{
	long long old, c = 0;

	while ((old = atomic64_cmpxchg(v, c, c & i)) != c)
	while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
		c = old;
}

static inline long long atomic64_fetch_and(long long i, atomic64_t *v)
static inline long long arch_atomic64_fetch_and(long long i, atomic64_t *v)
{
	long long old, c = 0;

	while ((old = atomic64_cmpxchg(v, c, c & i)) != c)
	while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
		c = old;

	return old;
}

static inline void atomic64_or(long long i, atomic64_t *v)
static inline void arch_atomic64_or(long long i, atomic64_t *v)
{
	long long old, c = 0;

	while ((old = atomic64_cmpxchg(v, c, c | i)) != c)
	while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
		c = old;
}

static inline long long atomic64_fetch_or(long long i, atomic64_t *v)
static inline long long arch_atomic64_fetch_or(long long i, atomic64_t *v)
{
	long long old, c = 0;

	while ((old = atomic64_cmpxchg(v, c, c | i)) != c)
	while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
		c = old;

	return old;
}

static inline void atomic64_xor(long long i, atomic64_t *v)
static inline void arch_atomic64_xor(long long i, atomic64_t *v)
{
	long long old, c = 0;

	while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c)
	while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
		c = old;
}

static inline long long atomic64_fetch_xor(long long i, atomic64_t *v)
static inline long long arch_atomic64_fetch_xor(long long i, atomic64_t *v)
{
	long long old, c = 0;

	while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c)
	while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
		c = old;

	return old;
}

static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
static inline long long arch_atomic64_fetch_add(long long i, atomic64_t *v)
{
	long long old, c = 0;

	while ((old = atomic64_cmpxchg(v, c, c + i)) != c)
	while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c)
		c = old;

	return old;
}

#define atomic64_fetch_sub(i, v)	atomic64_fetch_add(-(i), (v))
#define arch_atomic64_fetch_sub(i, v)	arch_atomic64_fetch_add(-(i), (v))

#endif /* _ASM_X86_ATOMIC64_32_H */
+54 −54

File changed.

Preview size limit exceeded, changes collapsed.

+6 −6
Original line number Diff line number Diff line
@@ -145,13 +145,13 @@ extern void __add_wrong_size(void)
# include <asm/cmpxchg_64.h>
#endif

#define cmpxchg(ptr, old, new)						\
#define arch_cmpxchg(ptr, old, new)					\
	__cmpxchg(ptr, old, new, sizeof(*(ptr)))

#define sync_cmpxchg(ptr, old, new)					\
#define arch_sync_cmpxchg(ptr, old, new)				\
	__sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))

#define cmpxchg_local(ptr, old, new)					\
#define arch_cmpxchg_local(ptr, old, new)				\
	__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))


@@ -250,10 +250,10 @@ extern void __add_wrong_size(void)
	__ret;								\
})

#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \
	__cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)

#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
#define arch_cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
	__cmpxchg_double(, p1, p2, o1, o2, n1, n2)

#endif	/* ASM_X86_CMPXCHG_H */
+4 −4
Original line number Diff line number Diff line
@@ -36,10 +36,10 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
}

#ifdef CONFIG_X86_CMPXCHG64
#define cmpxchg64(ptr, o, n)						\
#define arch_cmpxchg64(ptr, o, n)					\
	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
					 (unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n)					\
#define arch_cmpxchg64_local(ptr, o, n)					\
	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
					       (unsigned long long)(n)))
#endif
@@ -76,7 +76,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
 */

#define cmpxchg64(ptr, o, n)					\
#define arch_cmpxchg64(ptr, o, n)				\
({								\
	__typeof__(*(ptr)) __ret;				\
	__typeof__(*(ptr)) __old = (o);				\
@@ -93,7 +93,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
	__ret; })


#define cmpxchg64_local(ptr, o, n)				\
#define arch_cmpxchg64_local(ptr, o, n)				\
({								\
	__typeof__(*(ptr)) __ret;				\
	__typeof__(*(ptr)) __old = (o);				\
Loading