Skip to content

Commit

Permalink
[EASTL 3.17.02] (#395)
Browse files Browse the repository at this point in the history
eastl::atomic<T>

- fix all the spelling mistakes in the doc
- Added support for non-trivially default constructible types
- Cleaned up comments and impl
- improved 128-bit load code gen
- fixed type pun to support non-trivially default constructible types
- ensure msvc instrinics do not emit prefetch instructions

EASTL:  to_array implementation

EASTL: fix for rbtree input iterator ctor moving elements from the source container
  • Loading branch information
MaxEWinkler authored Nov 5, 2020
1 parent 7bd4505 commit 50fdd46
Show file tree
Hide file tree
Showing 56 changed files with 5,048 additions and 4,314 deletions.
53 changes: 44 additions & 9 deletions include/EASTL/array.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ namespace eastl
/// Implements a templated array class as per the C++ standard TR1.
/// This class allows you to use a built-in C style array like an STL vector.
/// It does not let you change its size, as it is just like a C built-in array.
/// Our implementation here strives to remove function call nesting, as that
/// Our implementation here strives to remove function call nesting, as that
/// makes it hard for us to profile debug builds due to function call overhead.
/// Note that this is intentionally a struct with public data, as per the
/// Note that this is intentionally a struct with public data, as per the
/// C++ standard update proposal requirements.
///
/// Example usage:
Expand Down Expand Up @@ -75,19 +75,19 @@ namespace eastl
count = N
};

// Note that the member data is intentionally public.
// This allows for aggregate initialization of the
// object (e.g. array<int, 5> a = { 0, 3, 2, 4 }; )
// Note that the member data is intentionally public.
// This allows for aggregate initialization of the
// object (e.g. array<int, 5> a = { 0, 3, 2, 4 }; )
value_type mValue[N ? N : 1];

public:
// We intentionally provide no constructor, destructor, or assignment operator.

void fill(const value_type& value);

// Unlike the swap function for other containers, array::swap takes linear time,
// Unlike the swap function for other containers, array::swap takes linear time,
// may exit via an exception, and does not cause iterators to become associated with the other container.
void swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable<value_type>::value);
void swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable<value_type>::value);

EA_CPP14_CONSTEXPR iterator begin() EA_NOEXCEPT;
EA_CPP14_CONSTEXPR const_iterator begin() const EA_NOEXCEPT;
Expand Down Expand Up @@ -318,7 +318,7 @@ namespace eastl


template <typename T, size_t N>
EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reference
EA_CPP14_CONSTEXPR inline typename array<T, N>::const_reference
array<T, N>::front() const
{
#if EASTL_ASSERT_ENABLED
Expand Down Expand Up @@ -382,7 +382,7 @@ namespace eastl
#endif

EA_ANALYSIS_ASSUME(i < N);
return static_cast<const_reference>(mValue[i]);
return static_cast<const_reference>(mValue[i]);
}


Expand Down Expand Up @@ -479,6 +479,41 @@ namespace eastl
}


///////////////////////////////////////////////////////////////////////
// to_array
///////////////////////////////////////////////////////////////////////
namespace internal
{
template<class T, size_t N, size_t... I>
EA_CONSTEXPR auto to_array(T (&a)[N], index_sequence<I...>)
{
return eastl::array<eastl::remove_cv_t<T>, N>{{a[I]...}};
}

template<class T, size_t N, size_t... I>
EA_CONSTEXPR auto to_array(T (&&a)[N], index_sequence<I...>)
{
return eastl::array<eastl::remove_cv_t<T>, N>{{eastl::move(a[I])...}};
}
}

template<class T, size_t N>
EA_CONSTEXPR eastl::array<eastl::remove_cv_t<T>, N> to_array(T (&a)[N])
{
static_assert(eastl::is_constructible_v<T, T&>, "element type T must be copy-initializable");
static_assert(!eastl::is_array_v<T>, "passing multidimensional arrays to to_array is ill-formed");
return internal::to_array(a, eastl::make_index_sequence<N>{});
}

template<class T, size_t N>
EA_CONSTEXPR eastl::array<eastl::remove_cv_t<T>, N> to_array(T (&&a)[N])
{
static_assert(eastl::is_move_constructible_v<T>, "element type T must be move-constructible");
static_assert(!eastl::is_array_v<T>, "passing multidimensional arrays to to_array is ill-formed");
return internal::to_array(eastl::move(a), eastl::make_index_sequence<N>{});
}


} // namespace eastl


Expand Down
164 changes: 80 additions & 84 deletions include/EASTL/atomic.h

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions include/EASTL/internal/atomic/arch/arm/arch_arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@
* NOTE:
*
* On ARM32/64, we use the 'trailing sync;' convention with the stricter load acquire that uses
* a dmb instead of control dependencie + isb to ensure the IRIW litmus test is satisfied
* a dmb instead of a control dependency + isb to ensure the IRIW litmus test is satisfied
* as one reason. See EASTL/atomic.h for futher explanation and deep-dive.
*
* For ARMv8 we could move to use the new proper store release and load acquire, RCsc variant.
* All ARMv7 approaches work on ARMv8 and this code path is only used on msvc which isn't used
* heavily. Most of the ARM code will end up going thru clang or gcc since microsft arm devices
* heavily. Most of the ARM code will end up going thru clang or gcc since microsoft arm devices
* aren't that abundant.
*/

Expand Down
24 changes: 8 additions & 16 deletions include/EASTL/internal/atomic/arch/arm/arch_arm_load.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
*/
#if defined(EA_PROCESSOR_ARM32)

#define EASTL_ARCH_ATOMIC_MSVC_ARM32_LDREXD(ret, ptr) \
ret = __ldrexd(ptr)
#define EASTL_ARCH_ATOMIC_ARM32_LDREXD(ret, ptr) \
ret = __ldrexd((ptr))

#endif

Expand Down Expand Up @@ -60,7 +60,7 @@
#define EASTL_ARCH_ATOMIC_LOAD_64(type, ret, ptr) \
{ \
__int64 loadRet64; \
EASTL_ARCH_ATOMIC_MSVC_ARM32_LDREXD(loadRet64, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__int64, (ptr))); \
EASTL_ARCH_ATOMIC_ARM32_LDREXD(loadRet64, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__int64, (ptr))); \
\
ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, loadRet64); \
}
Expand All @@ -75,6 +75,7 @@

/**
* NOTE:
*
* The ARM documentation states the following:
* A 64-bit pair requires the address to be quadword aligned and is single-copy atomic for each doubleword at doubleword granularity
*
Expand All @@ -83,22 +84,13 @@
*/
#define EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, MemoryOrder) \
{ \
struct BitfieldPun128 \
{ \
__int64 value[2]; \
}; \
\
struct BitfieldPun128 loadedPun = EASTL_ATOMIC_TYPE_PUN_CAST(struct BitfieldPun128, *(ptr)); \
\
bool cmpxchgRetBool; \
ret = *(ptr); \
do \
{ \
bool cmpxchgRetBool; \
EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(struct BitfieldPun128, cmpxchgRetBool, \
EASTL_ATOMIC_TYPE_CAST(struct BitfieldPun128, (ptr)), \
&loadedPun, loadedPun); \
EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, \
ptr, &(ret), ret); \
} while (!cmpxchgRetBool); \
\
ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, loadedPun); \
}


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@

/**
* NOTE:
*
* While it makes no sense for a hardware memory barrier to not imply a compiler barrier.
* MSVC docs do not explicitly state that, so better to be safe than sorry chasing down
* hard to find bugs due to the compiler deciding to reorder things.
Expand Down
24 changes: 16 additions & 8 deletions include/EASTL/internal/atomic/arch/x86/arch_x86.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,14 @@

/**
* NOTE:
*
* On 32-bit x86 CPUs Intel Pentium and newer, AMD K5 and newer
* and any i686 class of x86 CPUs support only 64-bit cmpxchg
* and any i586 class of x86 CPUs support only 64-bit cmpxchg
* known as cmpxchg8b.
* On these class of cpus we can guarantee that 64-bit loads are
* also atomic by using the SSE1/SSE2 movq instructions.
*
* On these class of cpus we can guarantee that 64-bit loads/stores are
* also atomic by using the SSE2 movq, SSE1 movlps, or x87 fild/fstp instructions.
*
* We support all other atomic operations
* on compilers that only provide this 64-bit cmpxchg instruction
* by wrapping them around the 64-bit cmpxchg8b instruction.
Expand Down Expand Up @@ -91,21 +94,26 @@

/**
* NOTE:
*
* 64-bit x64 CPUs support only 128-bit cmpxchg known as cmpxchg16b.
*
* We support all other atomic operations by wrapping them around
* the 128-bit cmpxchg16b instruction.
* 128-bit loads are only atomic if using cmpxchg16b on x64.
*
* 128-bit loads are only atomic by using the cmpxchg16b instruction.
* SSE 128-bit loads are not guaranteed to be atomic even though some CPUs
* make them atomic such as AMD Ryzen or Intel SandyBridge.
*/
#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))


#define EASTL_ARCH_ATOMIC_X64_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \
static_assert(false, "EASTL_ARCH_ATOMIC_X64_NOP_PRE_COMPUTE_DESIRED() must be implmented!");
#define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \
static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!");

#define EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET(ret, prevObserved, val)
#define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val)


#define EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \
#define EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \
{ \
bool cmpxchgRet; \
/* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \
Expand Down
34 changes: 17 additions & 17 deletions include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,37 +57,37 @@
#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))


#define EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
#define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
ret = ((observed) + (val))

#define EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
#define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
ret = ((prevObserved) + (val))


#define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET)
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)

#define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET)
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)

#define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET)
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)

#define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET)
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)

#define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET)
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)


#endif
Expand Down
34 changes: 17 additions & 17 deletions include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,37 +57,37 @@
#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))


#define EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
#define EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
ret = ((observed) & (val))

#define EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
#define EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
ret = ((prevObserved) & (val))


#define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET)
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)

#define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET)
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)

#define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET)
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)

#define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET)
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)

#define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \
EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET)
EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)


#endif
Expand Down
26 changes: 13 additions & 13 deletions include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,49 +18,49 @@
#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))


#define EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) \
#define EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) \
{ \
/* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \
__asm__ __volatile__ ("lock; cmpxchg16b %2\n" /* cmpxchg16b sets/clears ZF */ \
"sete %3" /* If ZF == 1, set the return value to 1 */ \
/* Output Operands */ \
: "=a"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))), "=d"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)) + 1)), \
: "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]), \
"+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))), \
"=rm"((ret)) \
/* Input Operands */ \
: "b"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))), "c"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)) + 1)), \
"a"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))), "d"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)) + 1)) \
: "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[1]), \
"a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]) \
/* Clobbers */ \
: "memory", "cc"); \
}


#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)

#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)

#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)

#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)

#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)

#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)

#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)

#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)

#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)


#endif
Expand Down
Loading

0 comments on commit 50fdd46

Please sign in to comment.