#ifndef ASM_X86_CMPXCHG_H
#define ASM_X86_CMPXCHG_H
#include <linux/compiler.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
/*
* Non-existant functions to indicate usage errors at link time
* (or compile-time if the compiler implements __compiletime_error().
*/
extern void __xchg_wrong_size(void)
__compiletime_error("Bad argument size for xchg");
extern void __cmpxchg_wrong_size(void)
__compiletime_error("Bad argument size for cmpxchg");
extern void __xadd_wrong_size(void)
__compiletime_error("Bad argument size for xadd");
extern void __add_wrong_size(void)
__compiletime_error("Bad argument size for add");
/*
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
* -1 because sizeof will never return -1, thereby making those switch
* case statements guaranteeed dead code which the compiler will
* eliminate, and allowing the "missing symbol in the default case" to
* indicate a usage error.
*/
#define __X86_CASE_B 1
#define __X86_CASE_W 2
#define __X86_CASE_L 4
#ifdef [31mCONFIG_64BIT[0m
#define __X86_CASE_Q 8
#else
#define __X86_CASE_Q -1 /* sizeof will never return -1 */
#endif
/*
* An exchange-type operation, which takes a value and a pointer, and
* returns the old value.
*/
#define __xchg_op(ptr, arg, op, lock) \
({ \
__typeof__ (*(ptr)) __ret = (arg); \
switch (sizeof(*(ptr))) { \
case __X86_CASE_B: \
asm volatile (lock #op "b %b0, %1\n" \
: "+q" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
case __X86_CASE_W: \
asm volatile (lock #op "w %w0, %1\n" \
: "+r" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
case __X86_CASE_L: \
asm volatile (lock #op "l %0, %1\n" \
: "+r" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
case __X86_CASE_Q: \
asm volatile (lock #op "q %q0, %1\n" \
: "+r" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
default: \
__ ## op ## _wrong_size(); \
} \
__ret; \
})
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
* Since this is generally used to protect other memory information, we
* use "asm volatile" and "memory" clobbers to prevent gcc from moving
* information around.
*/
#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
#ifdef [31mCONFIG_LOCKDOC_CMPXCHG[0m
static u8 __raw_cmpxchg_lock_b(u8 *ptr, u8 old, u8 new) {
u8 __ret;
volatile u8 *__ptr = (volatile u8 *)(ptr);
asm volatile("lock; " "cmpxchgb %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "q" (new), "0" (old)
: "memory");
return __ret;
}
static u16 __raw_cmpxchg_lock_w(u16 *ptr, u16 old, u16 new) {
u16 __ret;
volatile u16 *__ptr = (volatile u16 *)(ptr);
asm volatile("lock; " "cmpxchgw %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "r" (new), "0" (old)
: "memory");
return __ret;
}
static u32 __raw_cmpxchg_lock_l(u32 *ptr, u32 old, u32 new) {
u32 __ret;
volatile u32 *__ptr = (volatile u32 *)(ptr);
asm volatile("lock; " "cmpxchgl %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "r" (new), "0" (old)
: "memory");
return __ret;
}
static u64 __raw_cmpxchg_lock_q(u64 *ptr, u64 old, u64 new) {
#ifdef [31mCONFIG_64BIT[0m
u64 __ret;
volatile u64 *__ptr = (volatile u64 *)(ptr);
asm volatile("lock; " "cmpxchgq %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "r" (new), "0" (old)
: "memory");
return __ret;
#else
panic("%s should not be called!\n", __func__);
#endif
}
#define __raw_cmpxchg_lock(ptr, old, new, size) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
switch (size) { \
case __X86_CASE_B: \
{ \
u8 *__ptr = (u8*)ptr; \
__ret = __raw_cmpxchg_lock_b(__ptr, (u8)old, (u8)new);\
break; \
} \
case __X86_CASE_W: \
{ \
u16 *__ptr = (u16*)ptr; \
__ret = __raw_cmpxchg_lock_w(__ptr, (u16)old, (u16)new);\
break; \
} \
case __X86_CASE_L: \
{ \
u32 *__ptr = (u32*)ptr; \
__ret = __raw_cmpxchg_lock_l(__ptr, (u32)old, (u32)new);\
break; \
} \
case __X86_CASE_Q: \
{ \
u64 *__ptr = (u64*)ptr; \
__ret = __raw_cmpxchg_lock_q(__ptr, (u64)old, (u64)new);\
break; \
} \
default: \
__cmpxchg_wrong_size(); \
} \
__ret; \
})
static u8 __raw_cmpxchg_lock_prefix_b(u8 *ptr, u8 old, u8 new) {
u8 __ret;
volatile u8 *__ptr = (volatile u8 *)(ptr);
asm volatile(LOCK_PREFIX "cmpxchgb %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "q" (new), "0" (old)
: "memory");
return __ret;
}
static u16 __raw_cmpxchg_lock_prefix_w(u16 *ptr, u16 old, u16 new) {
u16 __ret;
volatile u16 *__ptr = (volatile u16 *)(ptr);
asm volatile(LOCK_PREFIX "cmpxchgw %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "r" (new), "0" (old)
: "memory");
return __ret;
}
static u32 __raw_cmpxchg_lock_prefix_l(u32 *ptr, u32 old, u32 new) {
u32 __ret;
volatile u32 *__ptr = (volatile u32 *)(ptr);
asm volatile(LOCK_PREFIX "cmpxchgl %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "r" (new), "0" (old)
: "memory");
return __ret;
}
static u64 __raw_cmpxchg_lock_prefix_q(u64 *ptr, u64 old, u64 new) {
#ifdef [31mCONFIG_64BIT[0m
u64 __ret;
volatile u64 *__ptr = (volatile u64 *)(ptr);
asm volatile(LOCK_PREFIX "cmpxchgq %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "r" (new), "0" (old)
: "memory");
return __ret;
#else
panic("%s should not be called!\n", __func__);
#endif
}
#define __raw_cmpxchg_lock_prefix(ptr, old, new, size) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
switch (size) { \
case __X86_CASE_B: \
{ \
u8 *__ptr = (u8*)ptr; \
__ret = (__typeof__(*(ptr)))__raw_cmpxchg_lock_prefix_b(__ptr, (u8)__old, (u8)__new); \
break; \
} \
case __X86_CASE_W: \
{ \
u16 *__ptr = (u16*)ptr; \
__ret = (__typeof__(*(ptr)))__raw_cmpxchg_lock_prefix_w(__ptr, (u16)__old, (u16)__new); \
break; \
} \
case __X86_CASE_L: \
{ \
u32 *__ptr = (u32*)ptr; \
__ret = (__typeof__(*(ptr)))__raw_cmpxchg_lock_prefix_l(__ptr, (u32)__old, (u32)__new); \
break; \
} \
case __X86_CASE_Q: \
{ \
u64 *__ptr = (u64*)ptr; \
__ret = (__typeof__(*(ptr)))__raw_cmpxchg_lock_prefix_q(__ptr, (u64)__old, (u64)__new); \
break; \
} \
default: \
__cmpxchg_wrong_size(); \
} \
__ret; \
})
static u8 __raw_cmpxchg_none_b(u8 *ptr, u8 old, u8 new) {
u8 __ret;
volatile u8 *__ptr = (volatile u8 *)(ptr);
asm volatile( "cmpxchgb %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "q" (new), "0" (old)
: "memory");
return __ret;
}
static u16 __raw_cmpxchg_none_w(u16 *ptr, u16 old, u16 new) {
u16 __ret;
volatile u16 *__ptr = (volatile u16 *)(ptr);
asm volatile("cmpxchgw %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "r" (new), "0" (old)
: "memory");
return __ret;
}
static u32 __raw_cmpxchg_none_l(u32 *ptr, u32 old, u32 new) {
u32 __ret;
volatile u32 *__ptr = (volatile u32 *)(ptr);
asm volatile("cmpxchgl %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "r" (new), "0" (old)
: "memory");
return __ret;
}
static u64 __raw_cmpxchg_none_q(u64 *ptr, u64 old, u64 new) {
#ifdef [31mCONFIG_64BIT[0m
u64 __ret;
volatile u64 *__ptr = (volatile u64 *)(ptr);
asm volatile("cmpxchgq %2,%1"
: "=a" (__ret), "+m" (*__ptr)
: "r" (new), "0" (old)
: "memory");
return __ret;
#else
panic("%s should not be called!\n", __func__);
#endif
}
#define __raw_cmpxchg_none(ptr, old, new, size) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
switch (size) { \
case __X86_CASE_B: \
{ \
u8 *__ptr = (u8*)ptr; \
__ret = __raw_cmpxchg_none_b(__ptr, (u8)old, (u8)new);\
break; \
} \
case __X86_CASE_W: \
{ \
u16 *__ptr = (u16*)ptr; \
__ret = __raw_cmpxchg_none_w(__ptr, (u16)old, (u16)new);\
break; \
} \
case __X86_CASE_L: \
{ \
u32 *__ptr = (u32*)ptr; \
__ret = __raw_cmpxchg_none_l(__ptr, (u32)old, (u32)new);\
break; \
} \
case __X86_CASE_Q: \
{ \
u64 *__ptr = (u64*)ptr; \
__ret = __raw_cmpxchg_none_q(__ptr, (u64)old, (u64)new);\
break; \
} \
default: \
__cmpxchg_wrong_size(); \
} \
__ret; \
})
#define __cmpxchg(ptr, old, new, size) \
__raw_cmpxchg_lock_prefix((ptr), (old), (new), (size))
#define __sync_cmpxchg(ptr, old, new, size) \
__raw_cmpxchg_lock((ptr), (old), (new), (size))
#define __cmpxchg_local(ptr, old, new, size) \
__raw_cmpxchg_none((ptr), (old), (new), (size))
#else
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#define __raw_cmpxchg(ptr, old, new, size, lock) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
switch (size) { \
case __X86_CASE_B: \
{ \
volatile u8 *__ptr = (volatile u8 *)(ptr); \
asm volatile(lock "cmpxchgb %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "q" (__new), "0" (__old) \
: "memory"); \
break; \
} \
case __X86_CASE_W: \
{ \
volatile u16 *__ptr = (volatile u16 *)(ptr); \
asm volatile(lock "cmpxchgw %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \
: "memory"); \
break; \
} \
case __X86_CASE_L: \
{ \
volatile u32 *__ptr = (volatile u32 *)(ptr); \
asm volatile(lock "cmpxchgl %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \
: "memory"); \
break; \
} \
case __X86_CASE_Q: \
{ \
volatile u64 *__ptr = (volatile u64 *)(ptr); \
asm volatile(lock "cmpxchgq %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \
: "memory"); \
break; \
} \
default: \
__cmpxchg_wrong_size(); \
} \
__ret; \
})
#define __cmpxchg(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
#define __sync_cmpxchg(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
#define __cmpxchg_local(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), "")
#endif
#ifdef [31mCONFIG_X86_32[0m
# include <asm/cmpxchg_32.h>
#else
# include <asm/cmpxchg_64.h>
#endif
#define cmpxchg(ptr, old, new) \
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
#define sync_cmpxchg(ptr, old, new) \
__sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
#define cmpxchg_local(ptr, old, new) \
__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
/*
* xadd() adds "inc" to "*ptr" and atomically returns the previous
* value of "*ptr".
*
* xadd() is locked when multiple CPUs are online
*/
#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
({ \
bool __ret; \
__typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
__typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
: "=a" (__ret), "+d" (__old2), \
"+m" (*(p1)), "+m" (*(p2)) \
: "i" (2 * sizeof(long)), "a" (__old1), \
"b" (__new1), "c" (__new2)); \
__ret; \
})
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double(, p1, p2, o1, o2, n1, n2)
#endif /* ASM_X86_CMPXCHG_H */