From 423527757d3e60d146b3887ca334e1c9924d8c10 Mon Sep 17 00:00:00 2001 From: Max Winkler Date: Fri, 9 Oct 2020 17:55:50 -0700 Subject: [PATCH] Oss/next release (#391) Ensure the alignment of a node is the alignment of the whole node allocation, not just the user type Removing old compiler special case code for EA_CPP14_CONSTEXPR Adding eastl::string hash tests and removing an addition overload in the helper template that contrains usage to enum types. Fixing user reported regression when attempting to use fancy pointers in a tuple Resolving uint128_t hashing compiler errors from properly limiting the generic template for enums eastl::pair adding C++17 structured bindings unpacking support eastl::atomic implementation - See EASTL/atomic.h for documentation eastl::function - Optimized function call operator Consolidate Warnings by using EA_DISABLE_WARNING macros Reverting the UDL warning suppression because of push/pop mismatch issues with the EABase warning suppression macros eastl::variant - Fixed variant warnings due to not sfinae overloads that are not the same type but still comparable - improved code gen on msvc -added tests Removed unndeded allocator_traits headers Added comments on reverse_wrapper Removed sparse_matrix.h as it was removed internally quite a while ago Updated files that had slight differences to internal eastl Update travis CI to use g++-9 && clang++-11 Updated README and CONTRIBUTING to allow contributors to submit their info under the contributors section --- .travis.yml | 21 +- CONTRIBUTING.md | 7 + README.md | 10 +- include/EASTL/algorithm.h | 13 +- include/EASTL/atomic.h | 1736 ++++++ include/EASTL/bitset.h | 42 +- include/EASTL/bitvector.h | 22 +- include/EASTL/bonus/adaptors.h | 52 +- include/EASTL/bonus/sparse_matrix.h | 1581 ------ include/EASTL/chrono.h | 13 +- include/EASTL/deque.h | 37 +- include/EASTL/fixed_allocator.h | 25 +- include/EASTL/functional.h | 26 +- include/EASTL/internal/allocator_traits.h | 347 -- .../internal/allocator_traits_fwd_decls.h | 40 - include/EASTL/internal/atomic/arch/arch.h | 65 + .../internal/atomic/arch/arch_add_fetch.h | 173 + .../internal/atomic/arch/arch_and_fetch.h | 173 + .../atomic/arch/arch_cmpxchg_strong.h | 430 ++ .../internal/atomic/arch/arch_cmpxchg_weak.h | 430 ++ .../atomic/arch/arch_compiler_barrier.h | 19 + .../internal/atomic/arch/arch_cpu_pause.h | 25 + .../internal/atomic/arch/arch_exchange.h | 173 + .../internal/atomic/arch/arch_fetch_add.h | 173 + .../internal/atomic/arch/arch_fetch_and.h | 173 + .../internal/atomic/arch/arch_fetch_or.h | 173 + .../internal/atomic/arch/arch_fetch_sub.h | 173 + .../internal/atomic/arch/arch_fetch_xor.h | 173 + .../EASTL/internal/atomic/arch/arch_load.h | 125 + .../atomic/arch/arch_memory_barrier.h | 47 + .../internal/atomic/arch/arch_or_fetch.h | 173 + .../internal/atomic/arch/arch_signal_fence.h | 21 + .../EASTL/internal/atomic/arch/arch_store.h | 113 + .../internal/atomic/arch/arch_sub_fetch.h | 173 + .../internal/atomic/arch/arch_thread_fence.h | 49 + .../internal/atomic/arch/arch_xor_fetch.h | 173 + .../EASTL/internal/atomic/arch/arm/arch_arm.h | 89 + .../internal/atomic/arch/arm/arch_arm_load.h | 164 + .../atomic/arch/arm/arch_arm_memory_barrier.h | 96 + .../internal/atomic/arch/arm/arch_arm_store.h | 142 + .../atomic/arch/arm/arch_arm_thread_fence.h | 37 + .../EASTL/internal/atomic/arch/x86/arch_x86.h | 159 + .../atomic/arch/x86/arch_x86_add_fetch.h | 96 + .../atomic/arch/x86/arch_x86_and_fetch.h | 96 + .../atomic/arch/x86/arch_x86_cmpxchg_strong.h | 69 + .../atomic/arch/x86/arch_x86_cmpxchg_weak.h | 52 + .../atomic/arch/x86/arch_x86_exchange.h | 91 + .../atomic/arch/x86/arch_x86_fetch_add.h | 90 + .../atomic/arch/x86/arch_x86_fetch_and.h | 90 + .../atomic/arch/x86/arch_x86_fetch_or.h | 90 + .../atomic/arch/x86/arch_x86_fetch_sub.h | 90 + .../atomic/arch/x86/arch_x86_fetch_xor.h | 90 + .../internal/atomic/arch/x86/arch_x86_load.h | 159 + .../atomic/arch/x86/arch_x86_memory_barrier.h | 102 + .../atomic/arch/x86/arch_x86_or_fetch.h | 96 + .../internal/atomic/arch/x86/arch_x86_store.h | 171 + .../atomic/arch/x86/arch_x86_sub_fetch.h | 96 + .../atomic/arch/x86/arch_x86_thread_fence.h | 42 + .../atomic/arch/x86/arch_x86_xor_fetch.h | 96 + include/EASTL/internal/atomic/atomic.h | 250 + .../EASTL/internal/atomic/atomic_asserts.h | 70 + .../EASTL/internal/atomic/atomic_base_width.h | 315 ++ include/EASTL/internal/atomic/atomic_casts.h | 171 + include/EASTL/internal/atomic/atomic_flag.h | 170 + .../internal/atomic/atomic_flag_standalone.h | 69 + .../EASTL/internal/atomic/atomic_integral.h | 343 ++ include/EASTL/internal/atomic/atomic_macros.h | 61 + .../atomic/atomic_macros/atomic_macros.h | 87 + .../atomic_macros/atomic_macros_add_fetch.h | 98 + .../atomic_macros/atomic_macros_and_fetch.h | 98 + .../atomic/atomic_macros/atomic_macros_base.h | 65 + .../atomic_macros_cmpxchg_strong.h | 245 + .../atomic_macros_cmpxchg_weak.h | 245 + .../atomic_macros_compiler_barrier.h | 30 + .../atomic_macros/atomic_macros_cpu_pause.h | 22 + .../atomic_macros/atomic_macros_exchange.h | 98 + .../atomic_macros/atomic_macros_fetch_add.h | 98 + .../atomic_macros/atomic_macros_fetch_and.h | 98 + .../atomic_macros/atomic_macros_fetch_or.h | 98 + .../atomic_macros/atomic_macros_fetch_sub.h | 98 + .../atomic_macros/atomic_macros_fetch_xor.h | 98 + .../atomic/atomic_macros/atomic_macros_load.h | 75 + .../atomic_macros_memory_barrier.h | 38 + .../atomic_macros/atomic_macros_or_fetch.h | 97 + .../atomic_macros_signal_fence.h | 34 + .../atomic_macros/atomic_macros_store.h | 68 + .../atomic_macros/atomic_macros_sub_fetch.h | 98 + .../atomic_macros_thread_fence.h | 34 + .../atomic_macros/atomic_macros_xor_fetch.h | 98 + .../internal/atomic/atomic_memory_order.h | 44 + .../EASTL/internal/atomic/atomic_pointer.h | 277 + .../atomic/atomic_pop_compiler_options.h | 24 +- .../atomic/atomic_push_compiler_options.h | 17 + .../internal/atomic/atomic_size_aligned.h | 199 + .../EASTL/internal/atomic/atomic_standalone.h | 478 ++ .../EASTL/internal/atomic/compiler/compiler.h | 116 + .../atomic/compiler/compiler_add_fetch.h | 173 + .../atomic/compiler/compiler_and_fetch.h | 173 + .../atomic/compiler/compiler_barrier.h | 36 + .../atomic/compiler/compiler_cmpxchg_strong.h | 430 ++ .../atomic/compiler/compiler_cmpxchg_weak.h | 430 ++ .../atomic/compiler/compiler_cpu_pause.h | 32 + .../atomic/compiler/compiler_exchange.h | 173 + .../atomic/compiler/compiler_fetch_add.h | 173 + .../atomic/compiler/compiler_fetch_and.h | 173 + .../atomic/compiler/compiler_fetch_or.h | 173 + .../atomic/compiler/compiler_fetch_sub.h | 173 + .../atomic/compiler/compiler_fetch_xor.h | 173 + .../internal/atomic/compiler/compiler_load.h | 131 + .../atomic/compiler/compiler_memory_barrier.h | 47 + .../atomic/compiler/compiler_or_fetch.h | 173 + .../atomic/compiler/compiler_signal_fence.h | 49 + .../internal/atomic/compiler/compiler_store.h | 113 + .../atomic/compiler/compiler_sub_fetch.h | 173 + .../atomic/compiler/compiler_thread_fence.h | 49 + .../atomic/compiler/compiler_xor_fetch.h | 173 + .../atomic/compiler/gcc/compiler_gcc.h | 139 + .../compiler/gcc/compiler_gcc_add_fetch.h | 118 + .../compiler/gcc/compiler_gcc_and_fetch.h | 118 + .../compiler/gcc/compiler_gcc_barrier.h | 31 + .../gcc/compiler_gcc_cmpxchg_strong.h | 182 + .../compiler/gcc/compiler_gcc_cmpxchg_weak.h | 182 + .../compiler/gcc/compiler_gcc_cpu_pause.h | 31 + .../compiler/gcc/compiler_gcc_exchange.h | 118 + .../compiler/gcc/compiler_gcc_fetch_add.h | 118 + .../compiler/gcc/compiler_gcc_fetch_and.h | 118 + .../compiler/gcc/compiler_gcc_fetch_or.h | 118 + .../compiler/gcc/compiler_gcc_fetch_sub.h | 118 + .../compiler/gcc/compiler_gcc_fetch_xor.h | 118 + .../atomic/compiler/gcc/compiler_gcc_load.h | 90 + .../compiler/gcc/compiler_gcc_or_fetch.h | 118 + .../compiler/gcc/compiler_gcc_signal_fence.h | 38 + .../atomic/compiler/gcc/compiler_gcc_store.h | 89 + .../compiler/gcc/compiler_gcc_sub_fetch.h | 118 + .../compiler/gcc/compiler_gcc_thread_fence.h | 38 + .../compiler/gcc/compiler_gcc_xor_fetch.h | 118 + .../atomic/compiler/msvc/compiler_msvc.h | 223 + .../compiler/msvc/compiler_msvc_add_fetch.h | 104 + .../compiler/msvc/compiler_msvc_and_fetch.h | 104 + .../compiler/msvc/compiler_msvc_barrier.h | 31 + .../msvc/compiler_msvc_cmpxchg_strong.h | 178 + .../msvc/compiler_msvc_cmpxchg_weak.h | 162 + .../compiler/msvc/compiler_msvc_cpu_pause.h | 27 + .../compiler/msvc/compiler_msvc_exchange.h | 125 + .../compiler/msvc/compiler_msvc_fetch_add.h | 101 + .../compiler/msvc/compiler_msvc_fetch_and.h | 101 + .../compiler/msvc/compiler_msvc_fetch_or.h | 101 + .../compiler/msvc/compiler_msvc_fetch_sub.h | 104 + .../compiler/msvc/compiler_msvc_fetch_xor.h | 101 + .../compiler/msvc/compiler_msvc_or_fetch.h | 104 + .../msvc/compiler_msvc_signal_fence.h | 34 + .../compiler/msvc/compiler_msvc_sub_fetch.h | 107 + .../compiler/msvc/compiler_msvc_xor_fetch.h | 104 + include/EASTL/internal/config.h | 37 +- include/EASTL/internal/fixed_pool.h | 22 +- include/EASTL/internal/function_detail.h | 123 +- include/EASTL/internal/generic_iterator.h | 29 +- include/EASTL/internal/hashtable.h | 23 +- include/EASTL/internal/intrusive_hashtable.h | 26 +- include/EASTL/internal/red_black_tree.h | 29 +- include/EASTL/internal/smart_ptr.h | 6 +- include/EASTL/internal/thread_support.h | 23 +- include/EASTL/internal/type_fundamental.h | 4 + include/EASTL/iterator.h | 38 +- include/EASTL/list.h | 42 +- include/EASTL/memory.h | 45 +- include/EASTL/numeric_limits.h | 3 +- include/EASTL/priority_queue.h | 26 +- include/EASTL/shared_array.h | 11 +- include/EASTL/shared_ptr.h | 16 +- include/EASTL/slist.h | 49 +- include/EASTL/string.h | 51 +- include/EASTL/unique_ptr.h | 14 +- include/EASTL/utility.h | 53 +- include/EASTL/variant.h | 113 +- include/EASTL/vector.h | 33 +- source/assert.cpp | 6 +- source/atomic.cpp | 25 + source/hashtable.cpp | 28 +- source/thread_support.cpp | 4 +- test/source/EASTLTest.h | 4 +- test/source/TestAtomicAsm.cpp | 4863 +++++++++++++++++ test/source/TestAtomicBasic.cpp | 3826 +++++++++++++ test/source/TestExtra.cpp | 14 + test/source/TestHash.cpp | 32 +- test/source/TestSmartPtr.cpp | 102 - test/source/TestString.cpp | 11 +- test/source/TestString.inl | 26 + test/source/TestStringView.inl | 1 - test/source/TestTuple.cpp | 19 + test/source/TestUtility.cpp | 40 + test/source/TestVariant.cpp | 2 + test/source/TestVariant2.cpp | 82 + test/source/main.cpp | 2 + 194 files changed, 28453 insertions(+), 2725 deletions(-) create mode 100644 include/EASTL/atomic.h delete mode 100644 include/EASTL/bonus/sparse_matrix.h delete mode 100644 include/EASTL/internal/allocator_traits.h delete mode 100644 include/EASTL/internal/allocator_traits_fwd_decls.h create mode 100644 include/EASTL/internal/atomic/arch/arch.h create mode 100644 include/EASTL/internal/atomic/arch/arch_add_fetch.h create mode 100644 include/EASTL/internal/atomic/arch/arch_and_fetch.h create mode 100644 include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h create mode 100644 include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h create mode 100644 include/EASTL/internal/atomic/arch/arch_compiler_barrier.h create mode 100644 include/EASTL/internal/atomic/arch/arch_cpu_pause.h create mode 100644 include/EASTL/internal/atomic/arch/arch_exchange.h create mode 100644 include/EASTL/internal/atomic/arch/arch_fetch_add.h create mode 100644 include/EASTL/internal/atomic/arch/arch_fetch_and.h create mode 100644 include/EASTL/internal/atomic/arch/arch_fetch_or.h create mode 100644 include/EASTL/internal/atomic/arch/arch_fetch_sub.h create mode 100644 include/EASTL/internal/atomic/arch/arch_fetch_xor.h create mode 100644 include/EASTL/internal/atomic/arch/arch_load.h create mode 100644 include/EASTL/internal/atomic/arch/arch_memory_barrier.h create mode 100644 include/EASTL/internal/atomic/arch/arch_or_fetch.h create mode 100644 include/EASTL/internal/atomic/arch/arch_signal_fence.h create mode 100644 include/EASTL/internal/atomic/arch/arch_store.h create mode 100644 include/EASTL/internal/atomic/arch/arch_sub_fetch.h create mode 100644 include/EASTL/internal/atomic/arch/arch_thread_fence.h create mode 100644 include/EASTL/internal/atomic/arch/arch_xor_fetch.h create mode 100644 include/EASTL/internal/atomic/arch/arm/arch_arm.h create mode 100644 include/EASTL/internal/atomic/arch/arm/arch_arm_load.h create mode 100644 include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h create mode 100644 include/EASTL/internal/atomic/arch/arm/arch_arm_store.h create mode 100644 include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_load.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_store.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h create mode 100644 include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h create mode 100644 include/EASTL/internal/atomic/atomic.h create mode 100644 include/EASTL/internal/atomic/atomic_asserts.h create mode 100644 include/EASTL/internal/atomic/atomic_base_width.h create mode 100644 include/EASTL/internal/atomic/atomic_casts.h create mode 100644 include/EASTL/internal/atomic/atomic_flag.h create mode 100644 include/EASTL/internal/atomic/atomic_flag_standalone.h create mode 100644 include/EASTL/internal/atomic/atomic_integral.h create mode 100644 include/EASTL/internal/atomic/atomic_macros.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h create mode 100644 include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h create mode 100644 include/EASTL/internal/atomic/atomic_memory_order.h create mode 100644 include/EASTL/internal/atomic/atomic_pointer.h rename test/source/TestSparseMatrix.cpp => include/EASTL/internal/atomic/atomic_pop_compiler_options.h (60%) create mode 100644 include/EASTL/internal/atomic/atomic_push_compiler_options.h create mode 100644 include/EASTL/internal/atomic/atomic_size_aligned.h create mode 100644 include/EASTL/internal/atomic/atomic_standalone.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_add_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_and_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_barrier.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_exchange.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_fetch_add.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_fetch_and.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_fetch_or.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_load.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_or_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_signal_fence.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_store.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_thread_fence.h create mode 100644 include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h create mode 100644 include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h create mode 100644 include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h create mode 100644 source/atomic.cpp create mode 100644 test/source/TestAtomicAsm.cpp create mode 100644 test/source/TestAtomicBasic.cpp create mode 100644 test/source/TestVariant2.cpp diff --git a/.travis.yml b/.travis.yml index c451af6a..11b4a229 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,4 @@ +dist: xenial language: cpp cache: @@ -19,15 +20,17 @@ env: addons: apt: + update: true sources: - - ubuntu-toolchain-r-test - george-edison55-precise-backports - - llvm-toolchain-trusty-7 + - sourceline: 'ppa:ubuntu-toolchain-r/test' + - sourceline: 'deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-11 main' + key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' packages: - cmake - cmake-data - - g++-7 - - clang-7 + - g++-9 + - clang-11 matrix: include: @@ -56,10 +59,12 @@ git: before_install: - git submodule update --init - - if [[ "$CXX" == "g++" ]]; then export CC="gcc-7" ;fi - - if [[ "$CXX" == "g++" ]]; then export CXX="g++-7" ;fi - - if [[ "$CXX" == "clang++" && "${TRAVIS_OS_NAME}" != "osx" ]]; then export CC="clang-7" ;fi - - if [[ "$CXX" == "clang++" && "${TRAVIS_OS_NAME}" != "osx" ]]; then export CXX="clang++-7" ;fi + - if [[ "$CXX" == "g++" ]]; then export CC="gcc-9" ;fi + - if [[ "$CXX" == "g++" ]]; then export CXX="g++-9" ;fi + - if [[ "$CXX" == "clang++" && "${TRAVIS_OS_NAME}" != "osx" ]]; then export CC="clang-11" ;fi + - if [[ "$CXX" == "clang++" && "${TRAVIS_OS_NAME}" != "osx" ]]; then export CXX="clang++-11" ;fi + - if [[ "$CXX" == "g++-9" && "${TRAVIS_OS_NAME}" != "windows" ]]; then g++-9 -v ;fi + - if [[ "$CXX" == "clang++-11" && "${TRAVIS_OS_NAME}" != "windows" ]]; then clang++-11 -v ;fi install: # MOJI check; exit 1 if non-ascii characters detected in C++ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2ec4df49..036520e4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,6 +3,13 @@ Before you can contribute, EA must have a Contributor License Agreement (CLA) on file that has been signed by each contributor. You can sign here: [Go to CLA](https://electronicarts.na1.echosign.com/public/esignWidget?wid=CBFCIBAA3AAABLblqZhByHRvZqmltGtliuExmuV-WNzlaJGPhbSRg2ufuPsM3P0QmILZjLpkGslg24-UJtek*) +If you want to be recognized for your contributions to EASTL or have a project using EASTL be recognized; you can submit a pull request to the appropriate sections in [README.md](README.md). +Some examples of what the format and information will look like is as follows. +* John Smith - jsmith@domain.com +* John Smith +* Frostbite - Electronic Arts +* My Project - [link to said project] + ### Pull Request Policy All code contributions to EASTL are submitted as [Github pull requests](https://help.github.com/articles/using-pull-requests/). All pull requests will be reviewed by an EASTL maintainer according to the guidelines found in the next section. diff --git a/README.md b/README.md index da9ea480..7cc400ab 100644 --- a/README.md +++ b/README.md @@ -40,11 +40,12 @@ Please see [EASTL Introduction](doc/Introduction.md). Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on compiling and testing the source. -## Credits +## Credits And Maintainers EASTL was created by Paul Pedriana and he maintained the project for roughly 10 years. -Roberto Parolin is the current EASTL owner within EA and is responsible for the open source repository. +Roberto Parolin is the current EASTL owner and primary maintainer within EA and is responsible for the open source repository. +Max Winkler is the secondary maintainer for EASTL within EA and on the open source repository. Significant EASTL contributions were made by (in alphabetical order): @@ -58,6 +59,11 @@ Significant EASTL contributions were made by (in alphabetical order): * Roberto Parolin * Simon Everett +## Contributors + +## Projects And Products Using EASTL + +* Frostbite - Electronic Arts - [https://www.ea.com/frostbite] ## License diff --git a/include/EASTL/algorithm.h b/include/EASTL/algorithm.h index 0e0522a9..ff2bf8bb 100644 --- a/include/EASTL/algorithm.h +++ b/include/EASTL/algorithm.h @@ -243,17 +243,16 @@ #include #include -#ifdef _MSC_VER - #pragma warning(push, 0) - #if defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) +EA_DISABLE_ALL_VC_WARNINGS(); + + #if defined(EA_COMPILER_MSVC) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) #include #endif -#endif + #include #include // memcpy, memcmp, memmove -#ifdef _MSC_VER - #pragma warning(pop) -#endif + +EA_RESTORE_ALL_VC_WARNINGS(); #if defined(EA_PRAGMA_ONCE_SUPPORTED) #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. diff --git a/include/EASTL/atomic.h b/include/EASTL/atomic.h new file mode 100644 index 00000000..5072a166 --- /dev/null +++ b/include/EASTL/atomic.h @@ -0,0 +1,1736 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_H +#define EASTL_ATOMIC_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// Below is the documentation of the API of the eastl::atomic library. +// This includes class and free functions. +// Anything marked with a '+' infront of the name is an extension to the std API. +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// eastl::atomic memory_order API +// +// See below for full explanations on the memory orders and their guarantees. +// +// - eastl::memory_order_relaxed +// - eastl::memory_order_acquire +// - eastl::memory_order_release +// - eastl::memory_order_acq_rel +// - eastl::memory_order_seq_cst +// - +eastl::memory_order_read_depends +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// eastl::atomic class API +// +// All jargon and prerequisite knowledge is explained below. +// +// Unless otherwise specified all orders except read_depends is a valid order +// on the given operation. +// Unless otherwise specified all operations are valid on all types T. +// If no order is provided, seq_cst memory ordering is used for the operation. +// +// - atomic() : Value-initializes the underlying object as T{}. +// +// - atomic(T) : Initializes the underlying object with a copy of T. +// +// - T operator=(T) : Atomically assigns T as store(T, seq_cst). +// +// - is_lock_free() : true if the operations are lockfree. Always true for eastl. +// +// - store(T, order) : Atomically stores T affecting memory according to order. +// : Valid orders are relaxed, release, and seq_cst. +// +// - T load(order) : Atomically loads T affecting memory according to order. +// : Valid orders are relaxed, acquire, and seq_cst. +// : If T is a pointer type, read_depends is another valid order. +// +// - operator T() : Atomically loads T as load(T, seq_cst). +// +// - T exchange(T, order) : Atomically performs a RMW that replaces the current value with T. +// : Memory is affected according to order. +// : Returns the previous value stored before the RMW operation. +// +// - bool compare_exchange_weak(T&, T, successOrder, failOrder) +// : Atomically compares the value stored with that of T& and if equal replaces it with T. +// : This is a RMW operation. +// : If the comparison fails, loads the observed value into T&. This is a load operation. +// : Memory is affected in the RMW operation according to successOrder. +// : Memory is affected in the load operation according to failOrder. +// : failOrder cannot be a stronger order than successOrder. +// : Returns true or false if the comparison succeeded and T was stored into the atomic object. +// : +// : The weak variant may fail even if the observed value of the atomic object equals T&. +// : This can yield performance gains on platforms with ld/str exclusive pair instructions especially +// : when the compare_exchange operation is done in a loop. +// : Only the bool return value can be used to determine if the operation was successful. +// +// - bool compare_exchange_weak(T&, T, order) +// : Same as the above except that order is used for both the RMW and the load operation. +// : If order == acq_rel then the order of the load operation equals acquire. +// : If order == release then the order of the load operation equals relaxed. +// +// - bool compare_exchange_strong(T&, T, successOrder, failOrder) +// - bool compare_exchange_strong(T&, T, order) +// : This operation is the same as the above weak variants +// : expect that it will not fail spuriously if the value stored equals T&. +// +// The below operations are only valid for Integral types. +// +// - T fetch_add(T, order) +// : Atomically performs a RMW that increments the value stored with T. +// : Returns the previous value stored before the RMW operation. +// - T fetch_sub(T, order) +// : Atomically performs a RMW that decrements the value stored with T. +// : Returns the previous value stored before the RMW operation. +// - T fetch_and(T, order) +// : Atomically performs a RMW that bit-wise and's the value stored with T. +// : Returns the previous value stored before the RMW operation. +// - T fetch_or(T, order) +// : Atomically performs a RMW that bit-wise or's the value stored with T. +// : Returns the previous value stored before the RMW operation. +// - T fetch_xor(T, order) +// : Atomically performs a RMW that bit-wise xor's the value stored with T. +// : Returns the previous value stored before the RMW operation. +// +// - +T add_fetch(T, order) +// : Atomically performs a RMW that increments the value stored with T. +// : Returns the new updated value after the operation. +// - +T sub_fetch(T, order) +// : Atomically performs a RMW that decrements the value stored with T. +// : Returns the new updated value after the operation. +// - +T and_fetch(T, order) +// : Atomically performs a RMW that bit-wise and's the value stored with T. +// : Returns the new updated value after the operation. +// - +T or_fetch(T, order) +// : Atomically performs a RMW that bit-wise or's the value stored with T. +// : Returns the new updated value after the operation. +// - +T xor_fetch(T, order) +// : Atomically performs a RMW that bit-wise xor's the value stored with T. +// : Returns the new updated value after the operation. +// +// - T operator++/--() +// : Atomically increments or decrements the atomic value by one. +// : Returns the previous value stored before the RMW operation. +// : Memory is affected according to seq_cst ordering. +// +// - T ++/--operator() +// : Atomically increments or decrements the atomic value by one. +// : Returns the new updated value after the RMW operation. +// : Memory is affected according to seq_cst ordering. +// +// - T oprator+=/-=/&=/|=/^=(T) +// : Atomically adds, subtracts, bitwise and/or/xor the atomic object with T. +// : Returns the new updated value after the operation. +// : Memory is affected according to seq_cst ordering. +// +// +// The below operations are only valid for Pointer types +// +// - T* fetch_add(ptrdiff_t val, order) +// : Atomically performs a RMW that increments the value store with sizeof(T) * val +// : Returns the previous value stored before the RMW operation. +// - T* fetch_sub(ptrdiff_t val, order) +// : Atomically performs a RMW that decrements the value store with sizeof(T) * val +// : Returns the previous value stored before the RMW operation. +// +// - +T* add_fetch(ptrdiff_t val, order) +// : Atomically performs a RMW that increments the value store with sizeof(T) * val +// : Returns the new updated value after the operation. +// - +T* sub_fetch(ptrdiff_t val, order) +// : Atomically performs a RMW that decrements the value store with sizeof(T) * val +// : Returns the new updated value after the operation. +// +// - T* operator++/--() +// : Atomically increments or decrements the atomic value by sizeof(T) * 1. +// : Returns the previous value stored before the RMW operation. +// : Memory is affected according to seq_cst ordering. +// +// - T* ++/--operator() +// : Atomically increments or decrements the atomic value by sizeof(T) * 1. +// : Returns the new updated value after the RMW operation. +// : Memory is affected according to seq_cst ordering. +// +// +// - +EASTL_ATOMIC_HAS_[len]BIT Macro Definitions +// These macros provide the ability to compile-time switch on the availability of support for the specific +// bit width of an atomic object. +// Example: +// +// #if defined(EASTL_ATOMIC_HAS_128BIT) +// #endif +// +// Indicates the support for 128-bit atomic operations on an eastl::atomic object. +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// eastl::atomic_flag class API +// +// Unless otherwise specified all orders except read_depends is a valid order +// on the given operation. +// +// - atomic_flag() : Initializes the flag to false. +// +// - clear(order) +// : Atomically stores the value false to the flag. +// : Valid orders are relaxed, release, and seq_cst. +// +// - bool test_and_set(order) +// : Atomically exchanges flag with true and returns the previous value that was held. +// +// - bool test(order) +// : Atomically loads the flag value. +// : Valid orders are relaxed, acquire, and seq_cst. +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// eastl::atomic standalone free function API +// +// All class methods have a standalone free function that takes a pointer to the +// atomic object as the first argument. These functions just call the correct method +// on the atomic object for the given operation. +// These functions come in two variants, a non-explicit and an explicit variant +// that take on the form atomic_op() and atomic_op_explicit() respectively. +// The non-explicit variants take no order arguments and thus are all seq_cst. +// The explicit variants take an order argument. +// Only the standalone functions that do not have a class method equivalent pair will be +// documented here which includes all new extensions to the std API. +// +// - +compiler_barrier() +// : Read-Write Compiler Barrier. +// - +compiler_barrier_data_dependency(const T&) +// : Read-Write Compiler Barrier. +// : Applies a fake input dependency on const T& so the compiler believes said variable is used. +// : Useful for example when writing benchmark or testing code with local variables that must not get dead-store eliminated. +// - +cpu_pause() +// : Prevents speculative memory order violations in spin-wait loops. +// : Allows giving up core resources, execution units, to other threads while in spin-wait loops. +// - atomic_thread_fence(order) +// : Read docs below. +// - atomic_signal_fence(order) +// : Prevents reordering with a signal handler. +// - +atomic_load_cond(const eastl::atomic*, Predicate) +// : continuously loads the atomic object until Predicate is true +// : will properly ensure the spin-wait loop is optimal +// : very useful when needing to spin-wait for some condition to be true which is common is many lock-free algorithms +// : Memory is affected according to seq_cst ordering. +// - +atomic_load_cond_explicit(const eastl::atomic*, Predicate, Order) +// : Same as above but takes an order for how memory is affected +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// Deviations from the standard. This does not include new features added: +// +// 1. +// Description: Atomic class constructors are not and will not be constexpr. +// Reasoning : We assert in the constructor that the this pointer is properly aligned. +// There are no other constexpr functions that can be called in a constexpr +// context. The only use for constexpr here is const-init time or ensuring +// that the object's value is placed in the executable at compile-time instead +// of having to call the ctor at static-init time. If you are using constexpr +// to solve static-init order fiasco, there are other solutions for that. +// +// 2. +// Description: Atomic template T must always be nothrow default constructible +// Reasoning : If stores are always noexcept then your constructor should not be +// doing anything crazy as well. +// +// 3. +// Description: Atomics are always lock free +// Reasoning : We don't want people to fall into performance traps where implicit locking +// is done. If your user defined type is large enough to not support atomic +// instructions then your user code should do the locking. +// +// 4. +// Description: Atomic objects can not be volatile +// Reasoning : Volatile objects do not make sense in the context of eastl::atomic. +// Use the given memory orders to get the ordering you need. +// Atomic objects have to become visible on the bus. See below for details. +// +// 5. +// Description: Consume memory order is not supported +// Reasoning : See below for the reasoning. +// +// 6. +// Description: ATOMIC_INIT() macros and the ATOMIC_LOCK_FREE macros are not implemented +// Reasoning : Use the is_lock_free() method instead of the macros. +// ATOMIC_INIT() macros aren't needed since the default constructor value initializes. +// +// 7. +// Description: compare_exchange failure memory order cannot be stronger than success memory order +// Reasoning : Besides the argument that it ideologically does not make sense that a failure +// of the atomic operation shouldn't have a stricter ordering guarantee than the +// success of it; if that is required then just make the whole operation stronger. +// This ability was added and allowed in C++17 only which makes supporting multiple +// C++ versions harder when using the compiler provided intrinsics since their behaviour +// is reliant on the C++ version being compiled. Also makes it harder to reason about code +// using these atomic ops since C++ versions vary the behaviour. We have also noticed +// that versions of compilers that say they support C++17 do not properly adhere to this +// new requirement in their intrinsics. Thus we will not support this. +// +// 8. +// Description: All memory orders are distinct types instead of enum values +// Reasoning : This will not affect how the API is used in user code. +// It allows us to statically assert on invalid memory orders since they are compile-time types +// instead of potentially runtime enum values. +// Allows for more efficient code gen without the use of switch statements or if-else conditionals +// on the memory order enum values on compilers that do not provide intrinsics that take in a +// memory order, such as MSVC, especially in debug and debug-opt builds. +// + + +///////////////////////////////////////////////////////////////////////////////// +// +// ******** DISCLAIMER ******** +// +// This documentation is not meant to provide rigourous proofs on the memory models +// of specific architectures or the C++ memory model introduced in C++11. It is not +// meant to provide formal mathematical definitions and logic that shows that a given +// implementation adheres to the C++ memory model. This isn't meant to be some infallible +// oracle on memory models, barriers, observers, and architecture implementation details. +// What I do hope a reader gets out of this is the following. An understanding of the C++ +// memory model and how that relates to implementations on various architectures. Various +// phenomona and ways that compilers and architectures can steer away from a sequentially +// consistent system. To provide examples on how to use this library with common patterns +// that will been seen in many code bases. Lastly I would like to provide insight and +// further readings into the lesser known topics that aren't shared outside people +// who live in this space and why certain things are done the way they are +// such as cumulativity of memory barriers as one example. Sometimes specifying barriers +// as LDLD/LDST/STST/STLD doesn't actually cut it, and finer grain semantics are needed +// to describe cumulativity of memory barriers. +// +// ******** Layout of the Documentation ******** +// +// This document will first go through a variety of different hardware architectures with examples of the various kinds of +// reordering that is allowed by these architectures. We will use the memory barriers provided by the hardware to "fix" these +// examples. +// Then we will introduce the C++ memory model and revisit the examples using the platform agnostic abstract memory model to "fix" +// them. +// The hope here is that we get a sense of the various types of architectures and weak memory consistency provided by them and thus +// an appreciation for the design of the C++ abstract memory model. +// +// ******** REFERENCES ******** +// [1] Dekker's mutual exclusion algorithm made RW-safe +// [2] Handling Memory Ordering in Multithreaded Applications with Oracle Solaris +// [3] Evaluating the Cost of Atomic Operations on Modern Architectures +// [4] A Tutorial Introduction to the ARM and POWER Relaxed Memory Models +// [5] Memory Barriers: a Hardware View for Software Hackers +// [6] Memory Model = Instruction Reordering + Store Atomcity +// [7] ArMOR: Defending Against Memory Consistency Model Mismatches in Heterogeneous Architectures +// [8] Weak Memory Models: Balancing Definitional Simplicity and Implementation Flexibility +// [9] Repairing Sequential Consistency in C/C++11 +// [10] A high-level operational semantics for hardware weak memory models +// [11] x86-TSO: A Rigorous and Usable Programmer's Model for x86 Multiprocessors +// [12] Simplifying ARM Concurrency: Multicopy-Atomic Axiomatic and Operational Models for ARMv8 +// [13] Mixed-size Concurrency: ARM, POWER, C/C++11, and SC +// [14] P0668R4: Revising the C++ memory model +// [15] Constructing a Weak Memory Model +// [16] The Superfluous Load Queue +// [17] P0190R1: Proposal for New memory_order_consume Definition +// +// ******** What does it mean to be Atomic? ******** +// +// The word atomic has been overloaded and can mean a lot of differnt things depending on the context, +// so let's digest it. +// +// The first attribute for something to be atomic is that concurrent stores and loads +// must not tear or shear. This means if two threads write 0x01 and 0x02 at the same time +// then the only values that should ever be observed is 0x01 or 0x02. We can only see +// the whole write of 0x01 or 0x02, not 0x03 as an example. Many algorithms rely on +// this property; only very few such a Dekker's algorithm for mutual exclusion don't. +// Well actually a recent paper, [1], showed that Dekker's isn't safe without atomic +// loads and stores so this property is pretty fundamental and also hard to prove that +// your algorithm is safe without this property on loads and stores. +// +// We need to ensure the compiler emits a single load instruction. +// If we are doing 64-bit loads on a 32-bit platform, we need to ensure the load is one +// instruction instead of 2 32-bit loads into two registers. +// Another example is if we have this struct, struct { int32_t i; int32_t k; }, even on +// a 64-bit system we have to ensure the compiler does one 64-bit load and not two +// 32-bit loads for each individual member. +// +// We also need to ensure the correct instruction is emitted. A general load instruction +// to do a 64-bit load on a 32-bit platform may perform a 64-bit load but it may not +// be atomic, it may be turned into two 32-bit loads behind the scenes in the cpu. +// For example on ARMv7 we would have to use ldrexd not ldrd for 64-bit loads +// on a 32-bit ARMv7 core. +// +// An operation may be considered atomic if multiple sub-operations are done as one +// transanctional unit. This is commonly known as a Read-Modify-Write, RMW, operation. +// Take a simple add operation; it is actually a load from memory into a register, +// a modification of said register and then a store back to memory. If two threads +// concurrently execute this add operation on the same memory location; any interleaving +// of the 3 sub-operations is possible. It is possible that if the initial value is 0, +// the result may be 1 because each thread executed in lockstep both loading 0, adding 1 +// and then storing 1. A RMW operation may be considered atomic if the whole sequence of +// sub-operations are serialized as one transanctional unit. +// +// Atomicity may also refer to the order in which memory operations are observed and the +// dependencies between memory operations to different memory locations. As a quick example +// into the very thing we will be deep diving into that is not very intuitive. If I do, [STORE(A, 2); STORE(B, 1);], +// in one thread and another thread does, [r0 = LOAD(B); r1 = LOAD(A);]; if r0 == 1, thus we observed +// the store to B, will we observe r1 == 2. Our intuition tells us that well A was stored +// first and then B, so if I read the new value of B then I must also read the new value +// of A since the store to A happened before B so if I can see B then I must be able to +// see everthing before B which includes A. +// This highlights the ordering of memory operations and why memory barriers and memory +// models are so heavily attached to atomic operations because one could classify something +// is atomic if the dependency highlighted in the above example is allowed to be maintained. +// +// This is what people mean when you hear that volatile does NOT mean atomicity of the operation. +// Usually people imply a lot of implicit assumptions when they mark a variable as volatile. +// All volatile gives us is the ability to tell the compiler it may not assume anything +// about the state of that memory location. This means the compiler must always emit a load +// or store instruction, cannot perform constant folding, dead-store elimination, or +// do any sort of code movement on volatile variables. +// +// ******** Preliminary Basics ******** +// +// It is expected that the reader understands what a cache is, how it is organized and how data +// is chunked into cachelines. It is helpful if the reader understands basic cache coherency +// protocols such as MSI or MESI. +// It is expected the reader understands alignment, especially natural alignment +// of the processor and why alignment is important for data access. +// The reader should have some understanding of how a processor executes instructions, +// basics of what Out-of-Order execution means and basics of what speculative execution means. +// It is expected that the reader has an understanding of threading, multi-threaded programming +// and the use of concurrency primitives such as mutexes. +// Memory Barrier, Barrier, Memory Fence and Fence are all interchangeable synonyms. +// +// Independent memory operations can be performed or observed, depending on your perspective, +// in any order as long as the local cpu thinks its execution is happening in program order. +// This can be a problem for inter-cpu communications and thus we need some way to enforce +// that the compiler does not reorder instructions and that the cpu also does not reorder +// instructions. This is what a barrier is, it is an enforcement of ordering on memory instructions, +// so as the name suggests a barrier. Barriers can be one-sided or both-sided which means +// the barrier enforces a partial order above or below or on both sides of said barrier. +// +// Processors will use tricks such as out-of-order execution, memory instruction buffering and +// combining, speculative loads and speculative execution, branch prediction and many types of caching even +// in various interconnects from the cpu to the memory itself. One key thing to note is that cpus +// do not physically reorder the instruction stream. Instructions are dispatched and retired +// in-order but executed out-of-order. Memory barriers will prevent these tricks from happening +// by controling the interaction of multiple cpus. +// +// Compilers will morph your code and physically move instructions around as long as the program +// has the same observed behaviour. This is becoming increasingly true with more optimization techniques +// such as Link Time Optimization becoming the norm where once people assumed compilers couldn't assume +// something outside the given TU and now because they have the whole program view they know everything. +// This means the compiler does indeed alter the instruction stream +// and compiler barriers are a way to tell them to not move any memory instructions across the barrier. +// This does not prevent a compiler from doing optimizations such as constant folding, merging of +// overlapping loads, or even dead store elimination. Compiler barries are also very cheap and +// have zero impact on anything that the compiler knows isn't visible in memory such as local variables +// whose addresses do not escape the function even if their address is taken. You can think of it +// in terms of a sequence point as used with "volatile" qualified variables to denote a place in code where +// things must be stable and the compiler doesn't cache any variables in registers or do any reordering. +// +// Memory Barriers come in many flavours that instill a partial or full ordering on memory operations. +// Some memory operations themselves have implicit ordering guarantees already, for example +// Total-Store Order, TSO, architectures like x86 guarantee that a store operation cannot be reordered with a +// previous store operation thus a memory barrier that only orders stores is not needed +// on this architecture other than ensuring the compiler doesn't do any shenanigans. +// Considering we have 4 permutations of memory operations; a common way to describe an ordering +// is via Load-Load/LDLD, Load-Store/LDST, Store-Store/STST or Store-Load/STLD notation. You read this +// notation as follows; STLD memory barrier means a load cannot be reordered with a previous store. +// For example, on TSO architecture we can say all stores provide a STST memory barrier, +// since a store cannot be reordered with a previous store. +// +// Memory Barriers in itself are not a magic bullet, they come with caveats that must be known. +// Each cpu architecture also has its own flavours and guarantees provided by said memory barriers. +// There is no guarantee that memory instructions specified before a memory barrier will complete, +// be written to memory or fully propagated throughout the rest of the system, when the memory barrier +// instruction completes. The memory barrier creates a point in that local cpus queue of memory instructions +// whereby they must not cross. There is no guarantee that using a memory barrier on one cpu will have +// any effect at all on another remote cpu's observed view of memory. This also implies that executing +// a memory barrier does not hinder, incur, stall or enforce any other cpus to serialize with each other cpu. +// In order for a remote cpu to observe the correct effects it must also use a matching memory barrier. +// This means code communicating in 2 threads through memory must both be employing the use of memory barriers. +// For example, a store memory barrier that only orders stores, STST, in one thread must be paired with a load memory barrier +// that only orders loads, LDLD, in the other thread trying to observe those stores in the correct order. +// +// ******** Memory Types && Devices ******** +// +// eastl::atomic and accompanying memory barriers ONLY ORDER MEMORY to cpu-to-cpu communication through whatever the +// processor designates as normal cacheable memory. It does not order memory to devices. It does not provide any DMA ordering guarantees. +// It does not order memory with other memory types such as Write Combining. It strictly orders memory only to shared memory that is used +// to communicate between cpus only. +// +// ******** Sequentially Consistent Machine ******** +// +// The most intuitive as well as the model people naturally expect a concurrent system to have is Sequential Consistency. +// You may have or definitely have heard this term if you dealt with any type of distributed system. Lamport's definition +// articulates this consistency model the best. +// Leslie Lamport: "the result of any execution is the same as if the operations of all the processors were executed in some +// sequential order, and the operations of each individual processor appear in this sequence in the order +// specified by its program". +// +// A Sequentially Consistent machine is modelled as follows: +// +// ------------ ------------ +// | Thread 0 | ... | Thread N | +// ------------ ------------ +// | | | | +// | | | | +// ---------------------------------------- +// | | +// | Shared Memory | +// | | +// ---------------------------------------- +// +// This is a sequentially consistent machine. Each thread is executing instructions in program order which does loads and stores +// that are serialized in some order to the shared memory. This means all communication is done through the shared memory with one cpu +// doing one access at a time. This system has a couple key properties. +// +// 1. There is no local cpu memory reordering. Each cpu executes instructions in program order and all loads and stores must complete, +// be visible in the shared memory or be visible in a register before starting the next instruction. +// 2. Each memory operation becomes visible to all cpus at the same time. If a store hits the shared memory, then all subsequent loads +// from every other cpu will always see the latest store. +// +// A Sequentially Consistent machine has, Single-Copy Store Atomicity: All stores must become visible to all cores in the system at the same time. +// +// ******** Adding Caches ******** +// +// Caches by nature implicitly add the potential for memory reordering. A centralized shared snoopy bus that we all learned in school +// makes it easy to implement sequential consistency with caches. Writes and reads are all serialized in a total order via the cache bus transanction +// ordering. Every modern day bus is not inorder, and most certainly not a shared centralized bus. Cache coherency guarantees that all memory operations +// will be propagated eventually to all parties, but it doesn't guarantee in what order or in what time frame. Once you add +// caches, various levels of caching and various interconnects between remote cpus, you ineviatably run into the issue where +// some cpus observe the affects of a store before other cpus. Obviously we have weakly-ordered and strongly-ordered cpus with +// caches so why is that? The short answer is, where is the onous put, is it on the programmer or the hardware. Does the hardware +// have dependency tracking, is it able to determine when a memory order violation occurs such as rolling back its speculative execution +// and also how far along the chain of interconnects does the hardware wait before it determines that the memory operation has +// been acknowledged or is considered to satisfy its memory ordering guarantees. Again this is a very high level view of the system +// as a whole, but the take away is yes; caches do add the potential for reordering but other supporting hardware determines whether +// that is observable by the programmer. There is also some debate whether weakly-ordered processors are actually more performant +// than strongly-ordered cpus eluding to the fact that the hardware has a better picture of what is a violation versus the programmer +// having to emit far more barriers on weakly-ordered architectures in multi-threaded code which may actually not be needed because the +// hardware didn't commit a violation but it may have and we as the programmer cannot rely on may haves. +// +// ******** Store Buffers ******** +// +// Obviously having all stores serialize results in unnecessary stalls. Store buffers alleviate this issue. +// Store buffers are simple fixed size structures that sit between the cpu and the memory hierarchy. This allows +// each cpu to record its write in the store buffer and then move onto the next instruction. The store buffer will +// eventually be flushed to the resulting memory hierarchy in FIFO order. How and when this flushing occurs is irrelevant to the +// understanding of a store buffer. A read from an adress will grab the most recent write to the same address in the store buffer. +// +// The introduction of a store buffer is our first dive into weaker memory consistency. The addition of this hardware turns the consistency model weaker, +// into one that is commonly known as TSO, Total-Store Order. This is the exact model used by x86 cpus and we will see what this means +// and what new effects are observed with the addition of the store buffer. Below is a diagram of how the machine may now look. +// This type of store buffer is known as a FIFO store buffer, FIFO write buffer, or Load/Store Queue in some literature. This type of +// store buffer introduces STLD reordering but still prevents STST reordering. We will take a look at another type of store buffer later. +// Even with this store buffer, stores to the same address can still be merged so that only the latest store is written to the cache assuming +// no other intermediary stores happen. x86 cpus do write merging even for consecutive stores, i.e. storing to A and A+1 can be merged into one two-byte store. +// +// ------------ ------------ +// | Thread 0 | ... | Thread N | +// ------------ ------------ +// | | | | +// | | | | +// | Store | | Store | +// | Buffer | | Buffer | +// | | | | +// ---------------------------------------- +// | | +// | Shared Memory | +// | | +// ---------------------------------------- +// +// ---- Store-Buffering / Dekker's Example ---- +// This is a very common litmus test that showcases the introduction of STLD reordering. It is called Store-Buffering example because it is the only weaker +// behaviour observed under TSO and also called Dekker's Example as it famously breaks Dekker's mutual exclusion algorithm. +// +// --------------------------- +// Initial State: +// x = 0; y = 0; +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | STORE(y, 1) +// r0 = LOAD(y) | r1 = LOAD(x) +// --------------------------- +// Observed: r0 = 0 && r1 = 0 +// --------------------------- +// +// We would normally assume that any interleaving of the two threads cannot possibly end up with both loads reading 0. We assume that the observed outcome +// of r0 = 0 && r1 = 0 to be impossible, clearly that is not the case. Let's start by understanding the example with no reordering possible. Both threads +// run and their first instruction is to write the value 1 into either x or y, the next instruction then loads from the opposite variable. This means no +// matter the interleaving, one of the loads always executes after the other thread's store to that variable. +// We could observe r0 = 1 && r1 = 1 if both threads execute in lockstep. +// We could observe r0 = 0 && r1 = 1 if thread 0 executes and then thread 1 executes. +// We could observe r0 = 1 && r1 = 0 if thread 1 executes and then thread 0 executes. +// Since the stores always execute before that load in the other thread, one thread must always at least observe a store, so let's see why store buffers break this. +// +// What will happen is that STORE(x, 1) is stored to the store buffer but not made globally visible yet. +// STORE(y, 1) is written to the store buffer and also is not made globally visible yet. +// Both loads now read the initial state of x and y which is 0. We got the r0 = 0 && r1 = 0 outcome and just observed a Store-Load reordering. +// It has appeared as if the loads have been reordered with the previous stores and thus executed before the stores. +// Notice even if we execute the instructions in order, a series of other hardware side effects made it appear as if the instructions have been reordered. +// We can solve this by placing a Store-Load barrier after the store and before the load as follows. +// +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | STORE(y, 1) +// STLD BARRIER | STLD BARRIER +// r0 = LOAD(y) | r1 = LOAD(x) +// --------------------------- +// +// This STLD barrier effectively will flush the store buffer into the memory hierarchy ensuring all stores in the buffer are visible to all other cpus at the same time +// before executing the load instruction. Again nothing prevents a potenital hardware from speculatively executing the load even with the STLD barrier, the hardware will have to do +// a proper rollback if it detected a memory order violation otherwise it can continue on with its speculative load. The barrier just delimits a stability point. +// +// Most hardware does not provide granular barrier semenatics such as STLD. Most provide a write memory barrier which only orders stores, STST, a read memory barrier +// which only orders loads, LDLD, and then a full memory barrier which is all 4 permutations. So on x86 we will have to use the mfence, memory fence, instruction +// which is a full memory barrier to get our desired STLD requirements. +// +// TSO also has the property that we call, Multi-Copy Store Atomicity. This means a cpu sees its own stores before they become visible to other cpus, +// by forwarding them from the store buffer, but a store becomes visible to all other cpus at the same time when flushed from the store buffer. +// +// +// Let's look at a non-FIFO store buffer now as seen in ARM cpus as an example and we will use a standard Message Passing example to see how it manifests in even weaker consistency. +// A store buffer on ARM as an example allows write merging even with adjacent stores, is not a FIFO queue, any stores in the small hardware hash table may be ejected at any point +// due to a collision eviction or the availability of cache lines in the cache hierarchy meaning that stores may bypass the buffer entirely if that cacheline is already owned by that cpu. +// There is no guarantee that stores will be completed in order as in the FIFO case. +// +// --------------------------- +// Inital State: +// x = 0; y = 0; +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | while(LOAD(y) == 0); +// STORE(y, 1) | r0 = LOAD(x) +// --------------------------- +// Observed: r0 = 0 +// --------------------------- +// +// This is a classic Message Passing example that is very commonly used in production code. We store some values and then set a flag, STORE(y, 1) in this case. +// The other thread waits until the flag is observed and then reads the value out of x. If we observed the flag then we should obviously see all stores before the flag was set. +// Given our familiarity with TSO consistency above we know this definitely works on TSO and it is impossible to observe the load of x returning 0 under that consistency model. +// Let's see how this breaks with a non-FIFO store buffer. +// +// Thread 0 executes the STORE(x, 1) but the cacheline for x is not in thread 0's cache so we write to the store buffer and wait for the cacheline. +// Thread 1 executes the LOAD(y) and it also does not have y in its cacheline so it waits before completeing the load. +// Thread 0 moves on to STORE(y, 1). It owns this cacheline, hypothetically, so it may bypass the store buffer and store directly to the cache. +// Thread 0 receives message that Thread 1 needs y's cacheline, so it transfers the now modified cacheline to Thread 1. +// Thread 1 completes the load with the updated value of y = 1 and branches out of the while loop since we saw the new value of y. +// Thread 1 executes LOAD(x) which will return 0 since Thread 0 still hasn't flushed its store buffer waiting for x's cacheline. +// Thread 0 receives x's cacheline and now flushes x = 1 to the cache. Thread 1 will also have invalidated its cacheline for x that it brought in via the previous load. +// +// We have now fallen victim to STST reordering, allowing Thread 1 to observe a load of x returning 0. Not only does this store buffer allow STLD reording due to the nature of +// buffering stores, but it also allows another reordering; that of Store-Store reordering. It was observed as if Thread 0 executed STORE(y, 1) before STORE(x, 1) which completely +// broke our simple message passing scenario. +// +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | while(LOAD(y) == 0); +// STST BARRIER | +// STORE(y, 1) | r0 = LOAD(x) +// --------------------------- +// +// The STST memory barrier effectively ensures that the cpu will flush its store buffer before executing any subsequent stores. That is not entirely true, the cpu is still allowed +// to continue and execute stores to the store buffer as long as it doesn't flush them to the cache before the previous stores are flushed to the cache. If nothing becomes +// globally visible out of order then we are good. +// The example above will change how the processor executes due to the STST memory barrier. Thread 0 will execute STORE(y, 1), write to the store buffer and mark all current entries. Even though it owns the cacheline +// it cannot write the store to the cache until all marked entries, which are all the previous stores, are flushed to the cache. We have now fixed the message passing code by adding +// a STST or write memory barrier and thus it is no longer possible to observe the load of x returning 0. +// +// ******** Invalidation Queues ******** +// +// Due to the cache coherency protocol in play, a write to a cacheline will have to send invalidation messages to all other cpus that may have that cacheline as well. +// Immediately executing and responding to invalidation messages can cause quite a stall especially if the cache is busy at the moment with other requests. +// The longer we wait to invalidate the cacheline, the longer the remote cpu doing the write is stalled waiting on us. We don't like this very much. +// Invalidation Queues are just that, we queue up the action of actually invalidating the cache line but immediately respond to the request saying we did it anyway. +// Now the remote cpu thinks we invalidated said cacheline but actually it may very well still be in our cache ready to be read from. We just got weaker again, let's +// see how this manifests in code by starting from the end of our previous example. +// +// --------------------------- +// Inital State: +// x = 0; y = 0; +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | while(LOAD(y) == 0); +// STST BARRIER | +// STORE(y, 1) | r0 = LOAD(x) +// --------------------------- +// Observed: r0 = 0 +// --------------------------- +// +// Thread 1 receives the invalidate x's cacheline message and queues it because it is busy. +// Thread 1 receives the invalidate y's cacheline message, but we don't have that cacheline so acknowledge immediately. +// Thread 1 executes LOAD(y), loads in y's cacheline and branches out of the loop. +// Thread 1 executes LOAD(x), and loads from the cache the old value of x because the invalidation message is still sitting in the invalidation queue. +// +// We have just again observed the load of x returning 0 but from a different type of reordering now on the reader side. +// This is a form of LDLD, Load-Load, reordering as it appears as if LOAD(x) was executed before LOAD(y). This can be fixed as follows. +// +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | while(LOAD(y) == 0); +// STST BARRIER | LDLD BARRIER +// STORE(y, 1) | r0 = LOAD(x) +// --------------------------- +// +// The LDLD memory barrier essentially marks all entries currently in the invalidation queue. Any subsequent load must wait until all the marked entries have been +// processed. This ensures once we observe y = 1, we process all entries that came before y and that way we observe all the stores that happened before y. +// The insertion of the read memory barrier creates the required memory barrier pairing as discussed above and ensures that now our code executes as expected. +// +// It must be made clear that these are not the only hardware structure additions or ways that can relax STST, STLD and LDLD orderings. These are merely +// 2 structures that are common and ones that I choose to use as examples of how hardware can reduce ordering guarantees. Knowing how the hardware does this +// isn't always entirely clear but having a model that tells us what operations can be reordered is all we need to be able to reason about our code when executing on that hardware. +// +// ******** Load Buffering ******** +// +// The analog of the Store Buffering example, this litmus test has two threads read from two different locations and then write to the other locations. +// The outcome of having LDST reordering is allowed and observable on many processors such as ARM. +// +// --------------------------- +// Initial State: +// x = 0; y = 0; +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// r0 = LOAD(x) | r1 = LOAD(y) +// STORE(y, 1) | STORE(x, 1) +// --------------------------- +// Observed: r0 = 1 && r1 = 1 +// --------------------------- +// +// This is possible because the processor does not have to wait for the other cpu's cacheline to arrive before storing into the cache. +// Assume Thread 0 owns y's cacheline and Thread 1 owns x's cacheline. +// The processor may execute the load and thus buffer the load waiting for the cacheline to arrive. +// The processor may continue onto the store and since each cpu owns their respective cacheline, store the result into the cache. +// The cpus now receive the cachelines for x and y with the now modified value. +// We have just observed the loads returning 1 and thus observed LDST reordering. +// +// To forbid such outcome it suffices to add any full memory barrier to both threads or a local Read-After-Write/Read-To-Write dependency or a control dependency. +// +// ------------------------------- +// Thread 0 | Thread 1 +// ------------------------------- +// r0 = LOAD(x) | r1 = LOAD(y) +// if (r0 == 1) | if (r1 == 1) +// STORE(y, 1) | STORE(x, 1) +// ------------------------------- +// +// ----------------------------------------------------- +// Thread 0 | Thread 1 +// ----------------------------------------------------- +// r0 = LOAD(x) | r1 = LOAD(y) +// STORE(&(y + r0 - r1), 1) | STORE(&(x + r1 - r1), 1) +// ----------------------------------------------------- +// +// Both fixes above ensure that both writes cannot be commited, made globally visible, until their program source code order preceeding reads have been fully satisfied. +// +// ******** Compiler Barriers ******** +// +// Compiler barriers are both-sided barriers that prevent loads and stores from moving down past the compiler barrier and +// loads and stores from moving up above the compiler barrier. Here we will see the various ways our code may be subject +// to compiler optimizations and why compiler barriers are needed. Note as stated above, compiler barriers may not +// prevent all compiler optimizations or transformations. Compiler barriers are usually implemented by reloading all +// variables that currently cached in registers and flushing all stores in registers back to memory. +// This list isn't exhaustive but will hopefully try to outline what compiler barriers protect against and what they don't. +// +// Compiler may reorder loads. +// LOAD A; LOAD B; -> LOAD B; LOAD A; +// LOAD A; operation on A; LOAD B; operation on B; -> LOAD A; LOAD B; operation on A; operation on B +// +// Insert a compiler barrier in between the two loads to guarantee that they are kept in order. +// LOAD A; COMPILER_BARRIER; LOAD B; +// LOAD A; operation on A; COMPILER_BARRIER; LOAD B; operation on B; +// +// The same with stores. +// STORE(A, 1); STORE(B, 1); -> STORE(B, 1); STORE(A, 1); +// operations and STORE result into A; operations and STORE result int B; -> all operations; STORE result into B; STORE result into A; +// +// Insert a compiler barrier in between the two stores to guarantee that they are kept in order. +// It is not required the multiple stores to A before the barrier are not merged into one final store. +// It is not required that the store to B after the barrier be written to memory, it may be cached in a register for some indeterminate +// amount of time as an example. +// STORE(A, 1); COMPILER_BARRIER; STORE(B, 1); +// +// The compiler is allowed to merge overlapping loads and stores. +// Inserting a compiler barrier here will not prevent the compiler from doing this optimization as doing one wider load/store is +// technically still abidding by the guarantee that the loads/stores are not reordered with each other. +// LOAD A[0]; LOAD A[1]; -> A single wider LOAD instruction +// STORE(A[0], 1); STORE(A[1], 2); -> A single wider STORE instruction +// +// Compilers do not have to reload the values pointers point to. This is especially common with RISC architectures with lots +// of general purpose registers or even compiler optimizations such as inlining or Link-Time Optimization. +// int i = *ptr; Do bunch of operations; if (*ptr) { do more; } +// It is entirely possible the compiler may remove the last if statement because it can keep the *ptr in a register +// and it may infer from the operations done on i that i is never 0. +// +// int i = *ptr; Do bunch of operations; COMPILER_BARRIER; if (*ptr) { do more; } +// Inserting a compiler barrier at that location will cause the compiler to have reload *ptr thus keeping the if statement assuming +// no other optimizations take place, such as the compiler knowing that *ptr is always greater than 0. +// +// The compiler is within its rights to also merge and reload loads as much as it pleases. +// +// while (int tmp = LOAD(A)) +// process_tmp(tmp) +// +// Will be merged and transformed to +// +// if (int tmp = LOAD(A)) +// for (;;) process_tmp(tmp) +// +// Inserting a compiler barrier will ensure that LOAD(A) is always reloaded and thus the unwanted transformation is avoided. +// +// while (int tmp = LOAD(A)) +// { +// process_tmp(tmp) +// COMPILER_BARRIER +// } +// +// Under heavy register pressure scenarios, say the loop body was larger, the compiler may reload A as follows. +// Compiler barriers cannot prevent this from happening, even if we put it after process_tmp as above; +// the compiler still kept those loads above the barrier so it satisfied its contract even though it reloaded +// from A more than once. +// +// while (int tmp = LOAD(A)) +// process_tmp(LOAD(A)) +// +// In the above transformation it is possible that another cpu stores 0 into A. When we reload A for process_tmp, we pass 0 +// to process_tmp() which it would actually never expect to observe. Because if we observed 0, the while loop condition +// would never be satisfied. If the compiler under register pressure instead stored and loaded tmp from its stack slot, that is fine +// because we are just storing and loading the original observed value from A. Obviously that is slower than just reloading from +// A again so an optimizing compiler may not do the stack slot store. This is an unwanted transformation which eastl::atomic prevents +// even on relaxed loads. +// +// The compiler is allowed to do dead-store elimination if it knows that value has already been stored, or that only the last store +// needs to be stored. The compiler does not assume or know that these variables are shared variables. +// +// STORE(A, 1); STORE(A, 1); +// OPERATIONS; -> OPERATIONS; +// STORE(A, 1); +// +// The compiler is well within its rights to omit the second store to A. Assuming we are doing some fancy lockfree communication +// with another cpu and the last store is meant to ensure the ending value is 1 even if another cpu changed A inbetween; that +// assumption will not be satisfied. A compiler barrier will not prevent the last store from be dead-store removed. +// +// STORE(A, 1); +// OPERATIONS; +// STORE(A, 2); +// +// Assuming these stores are meant to denote some state changes to communicate with a remote cpu. The compiler is allowed to +// transform this as follows without a compiler barrier. Insert a compiler barrier between the two stores to prevent the transformation. +// Something like this will also require memory barriers, but that is not the point of this section. +// +// STORE(A, 2); +// OPERATIONS; +// +// The compiler is also allowed to invent stores as it may please. +// First on many RISC architectures storing an immediate value either involves loading the immediate from the .data section +// or combing a variety of load upper immediate and add or or immediate instructions to get our constant in a register and then +// doing a single 32-bit store instruction from said register. Some ISAs have 16-bit stores with immediate value so that a store +// may be broken into 2 16-bit store immediate values causing shearing. To reduce instruction dependencies it may also decide +// to do two add immediates and then two 16-bit stores again causing shearing. +// +// lui $t0, 1 # t0 == 0x00010000 +// ori $a0, $t0, 8 # t0 == 0x00010008 +// strw $t0, 0($a1) # store t0 into address at a1 +// -> +// ori $a0, $t0, 1 # t0 == 0x00000001 +// ori $a0, $t1, 8 # t0 == 0x00000008 +// strhw $t0, 0($a1) # store t0 lower half at a1 +// strhw $t1, 2($a1) # store t1 upper half at a1 +// +// The above shows a potential transformation that a compiler barrier cannot solve for us. +// +// A compiler may also introduce stores to save on branching. Let's see. +// +// if (a) +// STORE(X, 10); +// else +// STORE(X, 20); +// +// STORE(X, 20); +// if (a) +// STORE(X, 10); +// +// This is a very common optimization as it saves a potentially more expensive branch instruction but breaks multi-threaded code. +// This is also another case where a compiler barrier doesn't give us the granularity we need. +// The branches may even be completely removed with the compiler instead choosing to use conditional move operations which would +// actually be compliant since there would be one store only done, an extra store wouldn't have been added. +// +// You are now probably thinking that compiler barriers are useful and are definitely needed to tell the compiler to calm down +// and guarantee our hardware guarantees are valid because the code we wrote is the instructions that were emitted. +// But there are definitely lots of caveats where compiler barriers do not at all provide the guarantees we still need. +// This where eastl::atomic comes into play, and under the relaxed memory ordering section it will be explained +// what the standard guarantees and how we achieve those guarantees, like ensuring the compiler never does dead-store elimination or reloads. +// +// ******** Control Dependencies ******** +// +// Control dependencies are implicit local cpu ordering of memory instructions due to branching instructions, specifically +// only conditional branches. The problem is compilers do not understand control dependencies, and control dependencies +// are incredibly hard to understand. This is meant to make the reader aware they exist and to never use them +// because they shouldn't be needed at all with eastl::atomic. Also control dependencies are categorized as LDLD or LDST, +// store control dependencies inheritly do not make sense since the conditional branch loads and compares two values. +// +// A LDLD control dependency is an anti-pattern since it is not guaranteed that any architecture will detect the memory-order violation. +// r0 = LOAD(A); +// if (r0) +// r1 = LOAD(B) +// +// Given those sequence of instructions, it is entirely possible that a cpu attempts to speculatively predict and load the value of B +// before the branch instruction has finished executing. It is entirely allowed that the cpu loads from B, assume B is in cache and A +// is not in cache, before A. It is allowed, that even if the cpu was correct in it's prediction that it doesn't reload B and change the +// fact the it speculatively got lucky. +// +// This is also what the x86 pause instruction inserted into spin wait loops is meant to solve. +// LOOP: +// r0 = LOAD(A); +// if (!r0) pause; goto LOOP; +// +// In the above spin loop, after a couple of iterations the processor will fill the pipeline with speculated cmp and load instructions. +// x86 will catch a memory order violation if it sees that an external store was done to A and thus must flush the entire +// pipeline of all the speculated load A. Pause instruction tells the cpu to not do speculative loads so that the pipeline is not +// filled with all said speculative load instructions. This ensures we do not incur the costly pipeline flushes from memory order +// violations which is likely to occur in tight spin wait loops. This also allows other threads on the same physical core to use the +// core's resources better since our speculative nature won't be hogging it all. +// +// A LDST control dependency is a true dependency in which the cpu cannot make a store visible to the system and other cpus until it +// knows its prediction is correct. Thus a LDST ordering is guaranteed and can be always relied upon as in the following example. +// +// r0 = LOAD(A); +// if (r0) +// STORE(B, 1); +// +// The fun part comes in with how does the compiler actually break all of this. +// First is that if the compiler can ensure that the value of A in the LDST example is always not zero, then it is always within its +// righs to completely remove the if statement which would lend us with no control dependency. +// +// Things get more fun when we deal with conditionals with else and else if statements where the compiler might be able to employ +// invariant code motion optimizations. Take this example. +// +// r0 = LOAD(A); +// r1 = LOAD(B); +// if (r0) +// STORE(B, 1); +// /* MORE CODE */ +// else if (r1) +// STORE(B, 1); +// /* MORE CODE */ +// else +// STORE(B, 1); +// /* MORE CODE */ +// +// If we were trying to be smart and entirely rely on the control dependency to ensure order, ya well just don't the compiler +// is always smarter. The compiler is well within its rights to move all the STORE(B, 1) up and above all the conditionals breaking +// our reliance on the LDST control dependency. +// +// Things can get even more complicated especially in C++ when values may come from constexpr, inline, inline constexpr, static const, etc, +// variables and thus the compiler will do all sorts of transformations to reduce, remove, augment and change all your conditional code since +// it knows the values of the expressions or even parts of it at compile time. Even more agressive optimizations like LTO might break code that was being cautious. +// Even adding simple short circuiting logic or your classic likely/unlikely macros can alter conditionals in ways you didn't expect. +// In short know enough about control dependencies to know not to ever use them. +// +// ******** Multi-Copy Store Atomicity && Barrier Cumulativity ******** +// +// Single-Copy Store Atomicity: All stores must become visible to all cores in the system at the same time. +// +// Multi-Copy Store Atomicity : This means a cpu sees its own stores before they become visible to other cpus, by forwarding them from the store buffer, +// but a store becomes visible to all other cpus at the same time when flushed from the store buffer. +// +// Non-Atomic Store Atomicity : A store becomes visible to different cpus at different times. +// +// Those are the above variations of Store Atomicity. Most processors have Non-Atomic Store Atomicity and thus you must program to that lowest common denominator. +// We can use barriers, with some caveats, to restore Multi-Copy Store Atomicity to a Non-Atomic system though we need to define a new granular definition for +// memory barriers to define this behaviour. Simple LDLD/LDST/STST/STLD definition is not enough to categorize memory barriers at this level. Let's start off +// with a simple example that breaks under a Non-Atomic Store Atomicity system and what potenital hardware features allow this behaviour to be observed. +// +// NOTE: For all the below examples we assume no compile reordering and that the processor also executes the instructions with no local reorderings to make the examples simpler, +// to only show off the effects of Multi-Copy Store Atomicity. This is why we don't add any address dependencies, or mark explicit LDLD/LDST memory barriers. +// Thus you may assume all LDLD and LDST pairs have an address dependency between them, so that they are not reordered by the compiler or the local cpu. +// +// --------------------------------------------------------------------------------------------------------- +// Write-To-Read Causality, WRC, Litmus Test +// --------------------------------------------------------------------------------------------------------- +// Inital State: +// X = 0; Y = 0; +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(X) | r1 = LOAD(Y) +// | STORE(Y, r0) | r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 1 && r2 = 0 +// --------------------------------------------------------------------------------------------------------- +// +// Let's go over this example in detail and whether the outcome shown above can be observed. In this example Thread 0 stores 1 into X. If Thread 1 observes the write to X, +// it stores the observed value into Y. Thread 2 loads from Y then X. This means if the load from Y retuns 1, then we intuitively know the global store order +// was 1 to X and then 1 to Y. So is it possible then that the load from X in Thread 2 can return 0 in that case? Under a Multi-Copy Store Atomicity system, that would be +// impossible because once 1 was stored to X all cpus see that store so if Thread 2 saw the store to Y which can only happen after the store to X was observed, then +// Thread 2 must also have observed the store to X and return 1. As you may well have figured out, it is possible under a Non-Atomic Store Atomicity system to still +// observe the load from X returning 0 even if the above load from Y returned 1 in Thread 2. This completely breaks our intuition of causality. Let's now understand what hardware may cause this. +// +// This is possible on cpus that have Simultaneous Multi-Threading, SMT or HyperThreading in Intel parlance, which share resources such as store buffers or L1 cache. +// We are accustomed to the x86 way of SMT where each logical core shares Execution Units on the physical core but each logical core has their own statically partitioned +// cache and store buffer that is not visible to the other cpus. It is possible on cpus like ARMv7 or POWER, POWER9 supports 4 and even 8 threads per physical core, so +// to save on die space though yet enable this large number of threads per physical core it is common for these logical cores to all use the same store buffer or L1 cache +// per physical core on these processors. Let's take the above example and rerun it with this knowledge to get the observed behaviour outlined above. +// +// Assume Thread 0, Thread 1, and Thread 2 run on cpu 0, cpu 1, and cpu 2 respectively. Assume that cpu 0 and cpu 1 are two logical cores on the same physical core so this processor +// has an SMT value of 2. Thread 0 will store 1 into X. This store may be in the store buffer or in the L1 cache that cpu 1 also shares with cpu 0, thus cpu 1 has early access to cpu 0's stores. +// Thread 1 loads X which it observed as 1 early and then stores 1 into Y. Thread 2 may see the load from Y returning 1 but now the load from X returning 0 all because cpu 1 got early +// access to cpu 0 store due to sharing a L1 cache or store buffer. +// We will come back on how to fix this example with the proper memory barries for the Non-Atomic Store Atomicity systems, but we need to detour first. +// +// We need to take a deeper dive into memory barriers to understand how to restore Multi-Copy Store Atomicity from a Non-Atomic Store Atomicity system. +// Let's start with a motivating example and we will be using the POWER architecture throughout this example because it encompasses all the possible observable behaviour. +// ARMv7 technically allows Non-Atomic Store Atomicity behaviour but no consumer ARMv7 chip actually observes this behaviour. +// ARMv8 reworked its model to specifically say it is a Multi-Copy Store Atomicity system. +// POWER is one of the last few popular consumer architectures that are guaranteed to have Non-Atomic Store Atomicity observable behaviour, thus we will be using it for the following examples. +// +// To preface, POWER has two types of memory barriers called lwsync and sync. The following table lists the guarantees provided by TSO, x86, and the lwsync instruction. +// The table gives a hint as to why using our previous definition of LDLD/LDST/STST/STLD isn't granular enough to categorize memory barrier instructions. +// +// TSO: | POWER lwsync memory barrier: +// LDLD : YES | LDLD : YES +// LDST : YES | LDST : YES +// STST : YES | STST : YES +// STLD : NO | STLD : NO +// A cumulative : YES | A cumulative : YES +// B cumulative : YES | B cumulative : YES +// IRIW : YES | IRIW : NO +// +// The TSO memory model provided by x86 seems to be exactly the same as POWER if we add lwsync memory barrier instructions in between each of the memory instructions. +// This provides us the exact same ordering guarantees as the TSO memory model. If we just looked at the 4 permuatations of reorderings we would be inclined to assume that +// TSO has the exact same ordering as sprinkling lwsync in our code inbetween every pair of memory instructions. That is not the case because memory barrier causality and cumulativity differ in subtle ways. +// In this case they differ by the implicit guarantees from the TSO memory model versus those provided by the POWER lwsync memory barrier. +// So the lwsync memory barrier prevents reordering with instructions that have causality but does not prevent reordering with instructions that are completely independent. +// Let's dive into these concepts a bit more. +// +// Non-Atomic Store Atomicity architectures are prone to behaviours such as the non-causal outcome of the WRC test above. Architectures such as POWER defines memory barriers to enforce +// ordering with respect to memory accesses in remote cpus other than the cpu actually issuing the memory barrier. This is known a memory barrier cumulativity. +// How does the memory barrier issued on my cpu affect the view of memory accesses done by remote cpuss. +// +// Cumulative memory barriers are defined as follows - Take your time this part is very non-trivial: +// A-Cumulative: We denote group A as the set of memory instructions in this cpu or other cpus that are ordered before the memory barrier in this cpu. +// A-Cumulativity requires that memory instructions from any cpu that have performed prior to a memory load before the memory barrier on this cpu are also members of group A. +// B-Cumulative: We denote group B as the set of memory instructions in this cpu or other cpus that are ordered after the memory barrier in this cpu. +// B-Cumulativity requires that memory instructions from any cpu that perform after a load and including the load in that cpu that returns the value of a store in group B are +// also members of group B. +// IRIW : enforces a global ordering even for memory instructions that have no causality. The memory instructions are completely independent. +// +// --------------------------------------------------------------------------------------------------------- +// WRC Litmus Test +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// {i} : STORE(X, 1) | {ii} : r0 = LOAD(X) | {v} : r1 = LOAD(Y) +// | {iii} : lwsync | +// | {iv} : STORE(Y, r0) | {vi} : r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// Outcome: r0 = 1 && r1 = 1 && r2 = 1 +// +// Group A of {iii} : {i} && {ii} +// +// Group B of {iii} : {iv} && {v} && {vi} +// --------------------------------------------------------------------------------------------------------- +// +// Using the WRC test again and inserting a POWER lwsync, don't concern yourself with why the memory barrier was inserted at that spot right now, we now see the distinctions of group A and group B. +// It demonstrates the A and B Cumulative nature of the lwsync instruction, {iii}. First group A, initially consists of {ii} and group B initially consists of {iv} from the local cpu that issued the lwsync. +// Since {ii} reads from {i} and assume {i} happens before {ii}, by definition of A-Cumulativity {i} is included in group A. +// Similarly {v} reads from {iv} and assume {iv} happens before {v}, then {v} is included in group B by definition of B-Cumulativity. +// {vi} is also included in group B since it happens after {v} by definition of B-Cumulativity. +// +// WRC litmus test represents a scenario where only a A-Cumulative memory barrier is needed. The lwsync not only provides the needed local LDST memory barrier for the local thread but also ensures +// that any write Thread 1 has read from before the memory barrier is kept in order with any write Thread 1 does after the memory barrier as far as any other thread observes. +// In other words it ensures that any write that has propagated to Thread 1 before the memory barrier is propagated to any other thread before the second store after the memory barrier in Thread 1 +// can propagte to other threads in the system. This is exactly the definition of A-Cumulativity and what we need to ensure that causality is maintained in the WRC Litmus Test example. +// With that lwsync in place it is now impossible to observe r0 = 1 && r1 = 1 && r2 = 0. The lwsync has restored causal ordering. Let's look at an example that requires B-Cumulativity. +// +// --------------------------------------------------------------------------------------------------------- +// Example 2 from POWER manual +// --------------------------------------------------------------------------------------------------------- +// Inital State: +// X = 0; Y = 0; Z = 0 +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(Y) | r1 = LOAD(Z) +// STORE(Y, 1) | STORE(Z, r0) | r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 1 && r2 = 0 +// --------------------------------------------------------------------------------------------------------- +// +// This example is very similar to WRC except that we kinda extended the Message Passing through an additional shared variable instead. +// Think of this as Thread 0 writing some data into X, setting flag Y, Thread 1 waiting for flag Y then writing flag Z, and finally Thread 2 waiting for flag Z before reading the data. +// Take a minute to digest the above example and think about where a memory barrier, lwsync, should be placed. Don't peek at the solution below. +// +// --------------------------------------------------------------------------------------------------------- +// Example 2 from POWER manual +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(Y) | r1 = LOAD(Z) +// lwsync | | +// STORE(Y, 1) | STORE(Z, r0) | r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// +// First the lwsync provides the needed local STST memory barrier for the local thread, thus the lwsync here ensures that the store to X propagates to Thread 1 before the store to Y. +// B-Cumulativity applied to all operations after the memory barrier ensure that the store to X is +// kept in order with respect to the store to Z as far as all other threads participating in the dependency chain are concerned. This is the exact definition of B-Cumulativity. +// With this one lwsync the outcome outlined above is impossble to observe. If r0 = 1 && r1 = 1 then r2 must be properly observed to be 1. +// +// We know that lwsync only provides A-Cumulativity and B-Cumulativity. Now we will look at examples that have no causality constraints thus we need to grab heavier memory barriers +// that ensure in short we will say makes a store visible to all processors even those not on the dependency chains. Let's get to the first example. +// +// --------------------------------------------------------------------------------------------------------- +// Independent Reads of Independent Writes, IRIW, coined by Doug Lea +// --------------------------------------------------------------------------------------------------------- +// Inital State: +// X = 0; Y = 0; +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 | Thread 3 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(X) | STORE(Y, 1) | r2 = LOAD(Y) +// | r1 = LOAD(Y) | | r3 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 0 && r2 = 1 && r3 = 0 +// --------------------------------------------------------------------------------------------------------- +// +// The IRIW example above clearly shows that writes can be propagated to different cpus in completely different orders. +// Thread 1 sees the store to X but not the store to Y while Thread 3 sees the store to Y but not the store to X, the complete opposite. +// Also to the keen eye you may have noticed this example is a slight modification of the Store Buffer example so try to guess where the memory barriers would go. +// +// --------------------------------------------------------------------------------------------------------- +// Independent Reads of Independent Writes, IRIW, coined by Doug Lea +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 | Thread 3 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(X) | STORE(Y, 1) | r2 = LOAD(Y) +// | sync | | sync +// | r1 = LOAD(Y) | | r3 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// +// To ensure that the above observation is forbidden we need to add a full sync memory barrier on both the reading threads. Think of sync as restoring sequential consistency. +// The sync memory barrier ensures that any writes that Thread 1 has read from before the memory barrier are fully propagated to all threads before the reads are satisfied after the memory barrier. +// The same can be said for Thread 3. This is why the sync memory barrier is needed because there is no partial causal ordering here or anything that can be considered for our A and B Cumulativity definitions. +// We must ensure that all writes have been propagated to all cpus before proceeding. This gives way to the difference between sync and lwsync with regards to visiblity of writes and cumulativity. +// sync guarantees that all program-order previous stores must have been propagated to all other cpus before the memory instructions after the memory barrier. +// lwsync does not ensure that stores before the memory barrier have actually propagated to any other cpu before memory instructions after the memory barrier, but it will keep stores before and after the +// lwsync in order as far as other cpus are concerned that are within the dependency chain. +// +// Fun fact while ARMv7 claims to be Non-Atomic Store Atomicity no mainstream ARM implementation that I have seen has shown cases of Non-Atomic Store Atomicity. +// It's allowed by the ARMv7 memory model and thus you have to program to that. ARMv8 changes this and states that it has Multi-Copy Store Atomicity. +// +// ******** Release-Acquire Semantics ******** +// +// The most useful and common cases where Release-Acquire Semantics are used in every day code is in message passing and mutexes. Let's get onto some examples and the C++ definition of Release-Acquire. +// +// ACQUIRE: +// An Acquire operation is a one-way memory barrier whereby all loads and stores after the acquire operation cannot move up and above the acquire operation. +// Loads and stores before the acquire operation can move down past the acquire operation. An acquire operation should always be paired with a Release operation on the SAME atomic object. +// +// RELEASE: +// A Release operation is a one-way memory barrier whereby all loads and stores before the release operation cannot move down and below the release operation. +// Loads and stores after the release operation can move up and above the release operation. A release operation should always be paired with an Acquire operation on the SAME atomic object. +// +// Release-Acquire pair does not create a full memory barrier but it guarantees that all memory instructions before a Release operation on an atomic object M are visible after an Acquire +// operation on that same atomic object M. Thus these semantics usually are enough to preclude the need for any other memory barriers. +// The synchronization is established only between the threads Releasing and Acquiring the same atomic object M. +// +// --------------------------------------------------- +// Critical Section +// --------------------------------------------------- +// Thread 0 | Thread 1 +// --------------------------------------------------- +// mtx.lock() - Acquire | mtx.lock() - Acquire +// STORE(X, 1) | r0 = LOAD(X) +// mtx.unlock() - Release | mtx.unlock() - Release +// --------------------------------------------------- +// +// A mutex only requires Release-Acquire semantics to protect the critical section. We do not care if operations above the lock leak into the critical section or that operations below the unlock leak into the +// critical section because they are outside the protected region of the lock()/unlock() pair. Release-Acquire semantics does guarantee that everything inside the critical section cannot leak out. +// Thus all accesses of all previous critical sections for the mutex are guaranteed to have completed and be visible when the mutex is handed off to the next party due to the Release-Acquire chaining. +// This also means that mutexes do not provide or restore Multi-Copy Store Atomicity to any memory instructions outside the mutex, like the IRIW example since it does not emit full memory barriers. +// +// ------------------------------------------------------ +// Message Passing +// ------------------------------------------------------ +// Thread 0 | Thread 1 +// ------------------------------------------------------ +// STORE(DATA, 1) | while (!LOAD_ACQUIRE(FLAG)) +// | +// STORE_RELEASE(FLAG, 1) | r0 = LOAD(DATA) +// ------------------------------------------------------ +// +// This a common message passing idiom that also shows the use of Release-Acquire semantics. It should be obvious by the definitions outlined above why this works. +// An Acquire operation attached to a load needs to provide a LDLD and LDST memory barrier according to our definition of acquire. This is provided by default on x86 TSO thus no memory barrier is emitted. +// A Release operation attached to a store needs to provde a STST and LDST memory barrier according to our definition of release. This is provided by default on x86 TSO thus no memory barrier is emitted. +// +// A couple of things of note here. One is that by attaching the semantics of a memory model directly to the memory instruction/operation itself we can take advantage of the fact the some processors +// already provide guarantees between memory instructions and thus we do not have to emit memory barriers. Another thing of note is that the memory model is directly attached to the operation, +// so you must do the Release-Acquire pairing on the SAME object which in this case is the FLAG variable. Doing an Acquire or Release on a separate object has no guarantee to observe an Acquire or Release on a different object. +// This better encapsulates the meaning of the code and also allows the processor to potentially do more optimizations since a stand alone memory barrier will order all memory instructions of a given type before and after the barrier. +// Where as the memory ordering attached to the load or store tells the processor that it only has to order memory instructions in relation to that specific load or store with the given memory order. +// +// +// --------------------------------------------------------------------------------------------------------- +// Release Attached to a Store VS. Standalone Fence +// --------------------------------------------------------------------------------------------------------- +// STORE(DATA, 1) | STORE(DATA, 1) +// | ATOMIC_THREAD_FENCE_RELEASE() +// STORE_RELEASE(FLAG, 1) | STORE_RELAXED(FLAG, 1) +// STORE_RELAXED(VAR, 2) | STORE_RELAXED(VAR, 2) +// --------------------------------------------------------------------------------------------------------- +// ARMv8 Assembly +// --------------------------------------------------------------------------------------------------------- +// str 1, DATA | str 1, DATA +// | dmb ish +// stlr 1, FLAG | str 1, FLAG +// str 2, VAR | str 2, VAR +// --------------------------------------------------------------------------------------------------------- +// +// In the above example the release is attached to the FLAG variable, thus synchronization only needs to be guaranteed for that atomic variable. +// It is entirely possible for the VAR relaxed store to be reordered above the release store. +// In the fence version, since the fence is standalone, there is no notion where the release is meant to be attached to thus the fence must prevent all subsequent relaxed stores +// from being reordered above the fence. The fence provides a stronger guarantee whereby now the VAR relaxed store cannot be moved up and above the release operation. +// Also notice the ARMv8 assembly is different, the release fence must use the stronger dmb ish barrier instead of the dedicated release store instruction. +// We dive more into fences provided by eastl::atomic below. +// +// Release-Acquire semantics also have the property that it must chain through multiple dependencies which is where our knowledge from the previous section comes into play. +// Everything on the Release-Acquire dependency chain must be visible to the next hop in the chain. +// +// --------------------------------------------------------------------------------------------------------- +// Example 2 from POWER manual +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD_ACQUIRE(Y) | r1 = LOAD_ACQUIRE(Z) +// STORE_RELEASE(Y, 1) | STORE_RELEASE(Z, r0) | r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// +// --------------------------------------------------------------------------------------------------------- +// Write-To-Read Causality, WRC, Litmus Test +// --------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// --------------------------------------------------------------------------------------------------------- +// STORE(X, 1) | r0 = LOAD(X) | r1 = LOAD_ACQUIRE(Y) +// | STORE_RELEASE(Y, r0) | r2 = LOAD(X) +// --------------------------------------------------------------------------------------------------------- +// +// You may notice both of these examples from the previous section. We replaced the standalone POWER memory barrier instructions with Release-Acquire semantics attached directly to the operations where we want causality perserved. +// We have transformed those examples to use the eastl::atomic memory model. +// Take a moment to digest these examples in relation to the definition of Release-Acquire semantics. +// +// The Acquire chain can be satisfied by reading the value from the store release or any later stored headed by that release operation. The following examples will make this clearer. +// +// ------------------------------------------------------ +// Release Sequence Headed +// ------------------------------------------------------ +// Initial State: +// DATA = 0; FLAG = 0; +// ------------------------------------------------------ +// Thread 0 | Thread 1 +// ------------------------------------------------------ +// STORE(DATA, 1) | r0 = LOAD_ACQUIRE(FLAG) +// | +// STORE_RELEASE(FLAG, 1) | r1 = LOAD(DATA) +// STORE_RELAXED(FLAG, 3) | +// ------------------------------------------------------ +// Observed: r0 = 3 && r1 = 0 +// ------------------------------------------------------ +// +// In the above example we may read the value 3 from FLAG which was not the release store, but it was headed by that release store. Thus we observed a later store and therefore it is still valid to then observe r1 = 1. +// The stores to FLAG from the STORE_RELEASE up to but not including the next STORE_RELEASE operation make up the release sequence headed by the first release store operation. Any store on that sequence can be used to enforce +// causality on the load acquire. +// +// ******** Consume is currently not useful ******** +// +// Consume is a weaker form of an acquire barrier and creates the Release-Consume barrier pairing. +// Consume states that a load operation on an atomic object M cannot allow any loads or stores dependent on the value loaded by the operation to be reordered before the operation. +// To understand consume we must first understand dependent loads. +// You might encounter this being called a data dependency or an address dependency in some literature. +// +// -------------------------------------------------------------- +// Address Dependency +// -------------------------------------------------------------- +// Initial State: +// DATA = 0; PTR = nullptr; +// -------------------------------------------------------------- +// Thread 0 | Thread 1 +// -------------------------------------------------------------- +// STORE(DATA, 1) | r0 = LOAD(PTR) - typeof(r0) = int* +// | +// STORE(PTR, &DATA) | r1 = LOAD(r0) - typeof(r1) = int +// -------------------------------------------------------------- +// +// There is a clear dependency here where we cannot load from *int until we actually read the int* from memory. +// Now it is possible for Thread 1's load from *ptr to be observed before the store to DATA, therefore it can lead to r0 = &DATA && r1 = 0. +// While this is a failure of causality, it is allowed by some cpus such as the DEC Alpha and I believe Blackfin as well. +// Thus a data dependency memory barrier must be inserted between the data dependent loads in Thread 1. Note that this would equate to a nop on any processor other than the DEC Alpha. +// +// This can occur for a variety of hardware reasons. We learned about invalidation queues. It is possible that the invalidation for DATA gets buffered in Thread 1. DEC Alpha allows the Thread 1 +// load from PTR to continue without marking the entries in its invalidation queue. Thus the subsequent load is allowed to return the old cached value of DATA instead of waiting for the +// marked entries in the invalidation queue to be processed. It is a design decision of the processor not to do proper dependency tracking here and instead relying on the programmer to insert memory barriers. +// +// This data dependent ordering guarantee is useful because in places where we were using an Acquire memory barrier we can reduce it to this Consume memory barrier without any hardware barriers actually emitted on every modern processor. +// Let's take the above example, translate it to Acquire and Consume memory barriers and then translate it to the ARMv7 assembly and see the difference. +// +// --------------------------------------------------------------- --------------------------------------------------------------- +// Address Dependency - Release-Acquire Address Dependency - Release-Acquire - ARMv7 Assembly +// --------------------------------------------------------------- --------------------------------------------------------------- +// Thread 0 | Thread 1 Thread 0 | Thread 1 +// --------------------------------------------------------------- --------------------------------------------------------------- +// STORE(DATA, 1) | r0 = LOAD_ACQUIRE(PTR) STORE(DATA, 1) | r0 = LOAD(PTR) +// | dmb ish | dmb ish +// STORE_RELEASE(PTR, &DATA) | r1 = LOAD(r0) STORE(PTR, &DATA) | r1 = LOAD(r0) +// --------------------------------------------------------------- --------------------------------------------------------------- +// +// To get Release-Acquire semantics on ARMv7 we need to emit dmb ish; memory barriers. +// +// --------------------------------------------------------------- --------------------------------------------------------------- +// Address Dependency - Release-Consume Address Dependency - Release-Consume - ARMv7 Assembly +// --------------------------------------------------------------- --------------------------------------------------------------- +// Thread 0 | Thread 1 Thread 0 | Thread 1 +// --------------------------------------------------------------- --------------------------------------------------------------- +// STORE(DATA, 1) | r0 = LOAD_CONSUME(PTR) STORE(DATA, 1) | r0 = LOAD(PTR) +// | dmb ish | +// STORE_RELEASE(PTR, &DATA) | r1 = LOAD(r0) STORE(PTR, &DATA) | r1 = LOAD(r0) +// --------------------------------------------------------------- --------------------------------------------------------------- +// +// Data Dependencies can not only be created by read-after-write/RAW on registers, but also by RAW on memory locations too. Let's look at some more elaborate examples. +// +// --------------------------------------------------------------- --------------------------------------------------------------- +// Address Dependency on Registers - Release-Consume - ARMv7 Address Dependency on Memory - Release-Consume - ARMv7 +// --------------------------------------------------------------- --------------------------------------------------------------- +// Thread 0 | Thread 1 Thread 0 | Thread 1 +// --------------------------------------------------------------- --------------------------------------------------------------- +// STORE(DATA, 1) | r0 = LOAD(PTR) STORE(DATA, 1) | r0 = LOAD(PTR) +// | r1 = r0 + 0 | STORE(TEMP, r0) +// dmb ish | r2 = r1 - 0 dmb ish | r1 = LOAD(TEMP) +// STORE(PTR, &DATA) | r3 = LOAD(r2) STORE(PTR, &DATA) | r2 = LOAD(r1) +// --------------------------------------------------------------- --------------------------------------------------------------- +// +// The above shows a more elaborate example of how data dependent dependencies flow through RAW chains either through memory or through registers. +// +// Notice by identify that this is a data dependent operation and asking for a consume ordering, we can completely eliminate the memory barrier on Thread 1 since we know ARMv7 does not reorder data dependent loads. Neat. +// Unfortunately every major compiler upgrades a consume to an acquire ordering, because the consume ordering in the standard has a stronger guarantee and requires the compiler to do complicated dependency tracking. +// Dependency chains in source code must be mapped to dependency chains at the machine instruction level until a std::kill_dependency in the source code. +// +// ---------------------------------------------------------------- +// Non-Address Dependency && Multiple Chains +// ---------------------------------------------------------------- +// Initial State: +// std::atomic FLAG; int DATA[1] = 0; +// ---------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------- +// STORE(DATA[0], 1) | int f = LOAD_CONSUME(FLAG) +// | int x = f +// | if (x) return Func(x); +// | +// STORE_RELEASE(FLAG, 1) | Func(int y) return DATA[y - y] +// ---------------------------------------------------------------- +// +// This example is really concise but there is a lot going on. Let's digest it. +// First is that the standard allows consume ordering even on what we will call not true machine level dependencies like a ptr load and then a load from that ptr as shown in the previous examples. +// Here the dependency is between two ints, and the dependency chain on Thread 1 is as follows. f -> x -> y -> DATA[y - y]. The standard requires that source code dependencies on the loaded value +// from consume flow thru assignments and even thru function calls. Also notice we added a dependency on the dereference of DATA with the value loaded from consume which while it does nothing actually abides by the standard +// by enforcing a source code data dependent load on the consume operation. You may see this referred to as artificial data dependencies in other texts. +// If we assume the compiler is able to track all these dependencies, the question is how do we enforce these dependencies at the machine instruction level. Let's go back to our ptr dependent load example. +// +// ---------------------------------------------------------------- +// addi r0, pc, offset; +// ldr r1, 0(r0); +// ldr r2, 0(r1); +// ---------------------------------------------------------------- +// +// The above pseudo assembly does a pc relative calculation to find the address of ptr. We then load ptr and then continue the dependency chain by loading the int from the loaded ptr. +// Thus r0 has type of int**, which we use to load r1 an int* which we use to load our final value of r2 which is the int. +// The key observation here is that most instructions provided by most architectures only allow moving from a base register + offset into a destination register. +// This allows for trivial capturing of data dependent loads through pointers. But how do we capture the data dependency of DATA[y - y]. We would need something like this. +// +// ---------------------------------------------------------------- +// sub r1, r0, r0; // Assume r0 holds y from the Consume Operation +// add r3, r1, r2; // Assume r2 holds the address of DATA[0] +// ldr r4, 0(r3); +// ---------------------------------------------------------------- +// +// We cannot use two registers as both arguments to the load instruction. Thus to accomplish this you noticed we had to add indirect data dependencies through registers to compute the final address from the consume +// load of y and then load from the final computed address. The compiler would have to recognize all these dependencies and enforce that they be maintained in the generated assembly. +// The compiler must ensure the entire syntactic, source code, data-dependency chain is enforced in the generated assembly, no matter how long such chain may be. +// Because of this and other issues, every major compiler unilaterally promotes consume to an acquire operation across the board. Read reference [15] for more information. +// This completely removes the actual usefulness of consume for the pointer dependent case which is used quite heavily in concurrent read heavy data structures where updates are published via pointer swaps. +// +// ******** read_depends use case - Release-ReadDepends Semantics ******** +// +// eastl::atomic provides a weaker read_depends operation that only encapsulates the pointer dependency case above. Loading from a pointer and then loading the value from the loaded pointer. +// The read_depends operation can be used on loads from only an eastl::atomic type. The return pointer of the load must and can only be used to then further load values. And that is it. +// If you are unsure, upgrade this load to an acquire operation. +// +// MyStruct* ptr = gAtomicPtr.load(read_depends); +// int a = ptr->a; +// int b = ptr->b; +// return a + b; +// +// The loads from ptr after the gAtomicPtr load ensure that the correct values of a and b are observed. This pairs with a Release operation on the writer side by releasing gAtomicPtr. +// +// ******** Relaxed && eastl::atomic guarantees ******** +// +// We saw various ways that compiler barriers do not help us and that we need something more granular to make sure accesses are not mangled by the compiler to be considered atomic. +// Ensuring these guarantees like preventing dead-store elimination or the spliting of stores into smaller sub stores is where the C/C++11 +// standard comes into play to define what it means to operate on an atomic object. +// These basic guarantees are provided via new compiler intrinsics on gcc/clang that provide explicit indication to the compiler. +// Or on msvc by casting the underlying atomic T to a volatile T*, providing stronger compiler guarantees than the standard requires. +// Essentially volatile turns off all possible optimizations on that variable access and ensures all volatile variables cannot be +// reordered across sequence points. Again we are not using volatile here to guarantee atomicity, we are using it in its very intended purpose +// to tell the compiler it cannot assume anything about the contents of that variable. Now let's dive into the base guarantees of eastl::atomic. +// +// The standard defines the follow for all operations on an atomic object M. +// +// Write-Write Coherence: +// If an operation A modifies an atomic object M(store), happens before an operation B that modifies M(store), then A shall be earlier than B in the modification order of M. +// +// Read-Read Coherence: +// If a value computation A on an atomic object M(load), happens before a value computation B on M(load), and A takes its value from a side effect X on M(from a previous store to M), then the value +// computed by B shall either be the value stored by X or some later side effect Y on M, where Y follows X in the modification order of M. +// +// Read-Write Coherence: +// If a value computation A on an atomic object M(load), happens before an operation B that modifies M(store), then A shall take its value from a side effect X on M, where X precedes B in the modification +// order of M. +// +// Write-Read Coherence: +// If a side effect X on an atomic object M(store), happens before a value computation B on M(load), then the evaluation of B must take its value from X or from some side effect Y that follows X in the +// modification order of M. +// +// What does all this mean. This is just pedantic way of saying that the preceeding coherence requirements disallow compiler reordering of atomic operations to a single atomic object. +// This means all operations must be emitted by the compiler. Stores cannot be dead-store eliminated even if they are the only stores. +// Loads cannot have common subexpression elimination performed on them even if they are the only loads. +// Loads and Stores to the same atomic object cannot be reordered by the compiler. +// Compiler cannot introduce extra loads or stores to the atomic object. +// Compiler also cannot reload from an atomic object, it must save and store to a stack slot. +// Essentially this provides all the necessary guarantees needed when treating an object as atomic from the compilers point of view. +// +// ******** Same Address LoadLoad Reordering ******** +// +// It is expected that same address operations cannot and are not reordered with each other. It is expected that operations to the same address have sequetial consistency because +// they are to the same address. If you picture a cpu executing instructions, how is it possible to reorder instructions to the same address and yet kept program behaviour the same. +// Same Address LoadLoad Reordering is one weakening that is possible to do and keep observed program behaviour for a single-threaded program. +// More formally, A and B are two memory instructions onto the same address P, where A is program ordered before B. If A and B are both loads then their order need not be ordered. +// If B is a store then it cannot retire the store before A instruction completes. If A is a store and B is a load, then B must get its value forwarded from the store buffer or observe a later store +// from the cache. Thus Same Address LDST, STST, STLD cannot be reordered but Same Address LDLD can be reordered. +// Intel Itanium and SPARC RMO cpus allow and do Same Address LoadLoad Reordering. +// Let's look at an example. +// +// --------------------------- +// Same Address LoadLoad +// --------------------------- +// Inital State: +// x = 0; +// --------------------------- +// Thread 0 | Thread 1 +// --------------------------- +// STORE(x, 1) | r0 = LOAD(x) +// | r1 = LOAD(x) +// --------------------------- +// Observed: r0 = 1 && r0 = 0 +// --------------------------- +// +// Notice in the above example it has appeared as if the two loads from the same address have been reordered. If we first observed the new store of 1, then the next load should not observed a value in the past. +// Many programmers, expect same address sequential consistency, all accesses to a single address appear to execute in a sequential order. +// Notice this violates the Read-Read Coherence for all atomic objects defined by the std and thus provided by eastl::atomic. +// +// All operations on eastl::atomic irrelevant of the memory ordering of the operation provides Same Address Sequential Consistency since it must abide by the coherence rules above. +// +// ******** eastl::atomic_thread_fence ******** +// +// eastl::atomic_thread_fence(relaxed) : Provides no ordering guarantees +// eastl::atomic_thread_fence(acquire) : Prevents all prior loads from being reordered with all later loads and stores, LDLD && LDST memory barrier +// eastl::atomic_thread_fence(release) : Prevents all prior loads and stores from being reordered with all later stores, STST && LDST memory barrier +// eastl::atomic_thread_fence(acq_rel) : Union of acquire and release, LDLD && STST && LDST memory barrier +// eastl::atomic_thread_fence(seq_cst) : Full memory barrier that provides a single total order +// +// See Reference [9] and Fence-Fence, Atomic-Fence, Fence-Atomic Synchronization, Atomics Order and Consistency in the C++ std. +// +// ******** Atomic && Fence Synchronization ******** +// +// --------------------------- +// Fence-Fence Synchronization +// --------------------------- +// A release fence A synchronizes-with an acquire fence B if there exist operations X and Y on the same atomic object M, such that fence A is sequenced-before operation X and X modifies M, +// operation Y is sequenced-before B and Y reads the value written by X. +// In this case all non-atomic and relaxed atomic stores that are sequenced-before fence A will happen-before all non-atomic and relaxed atomic loads after fence B. +// +// ---------------------------- +// Atomic-Fence Synchronization +// ---------------------------- +// An atomic release operation A on atomic object M synchronizes-with an acquire fence B if there exists some atomic operation X on atomic object M, such that X is sequenced-before B and reads +// the value written by A. +// In this case all non-atomic and relaxed atomic stores that are sequenced-before atomic release operation A will happen-before all non-atomic and relaxed atomic loads after fence B. +// +// ---------------------------- +// Fence-Atomic Synchronization +// ---------------------------- +// A release fence A synchronizes-with an atomic acquire operation B on an atomic object M if there exists an atomic operation X such that A is sequenced-before X, X modifies M and B reads the +// value written by X. +// In this case all non-atomic and relaxed atomic stores that are sequenced-before fence A will happen-before all non-atomic and relaxed atomic loads after atomic acquire operation B. +// +// This can be used to add synchronization to a series of several relaxed atomic operations, as in the following trivial example. +// +// ---------------------------------------------------------------------------------------- +// Inital State: +// x = 0; +// eastl::atomic y = 0; +// z = 0; +// eastl::atomic w = 0; +// ---------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------------------------------- +// x = 2 | r0 = y.load(memory_order_relaxed); +// z = 2 | r1 = w.load(memory_order_relaxed); +// atomic_thread_fence(memory_order_release); | atomic_thread_fence(memory_order_acquire); +// y.store(1, memory_order_relaxed); | r2 = x +// w.store(1, memory_order_relaxed); | r3 = z +// ---------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 1 && r2 = 0 && r3 = 0 +// ---------------------------------------------------------------------------------------- +// +// ******** Atomic vs Standalone Fence ******** +// +// A sequentially consistent fence is stronger than a sequentially consistent operation because it is not tied to a specific atomic object. +// An atomic fence must provide sychronization with ANY atomic object where as the ordering on the atomic object itself must only provide +// that ordering on that SAME atomic object. Thus this can provide cheaper guarantees on architectures with dependency tracking hardware. +// Let's look at a concrete example that will make this all clear. +// +// ---------------------------------------------------------------------------------------- +// Inital State: +// eastl::atomic y = 0; +// eastl::atomic z = 0; +// ---------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------------------------------- +// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed); +// atomic_thread_fence(memory_order_seq_cst); | atomic_thread_fence(memory_order_seq_cst); +// y.store(1, memory_order_relaxed); | r1 = z.load(memory_order_relaxed); +// ---------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 0 +// ---------------------------------------------------------------------------------------- +// +// Here the two sequentially consistent fences synchronize-with each other thus ensuring that if we observe r0 = 1 then we also observe that r1 = 2. +// In the above example if we observe r0 = 1 it is impossible to observe r1 = 0. +// +// ---------------------------------------------------------------------------------------- +// Inital State: +// eastl::atomic x = 0; +// eastl::atomic y = 0; +// eastl::atomic z = 0; +// ---------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------------------------------- +// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed); +// x.fetch_add(1, memory_order_seq_cst); | x.fetch_add(1, memory_order_seq_cst); +// y.store(1, memory_order_relaxed); | r1 = z.load(memory_order_relaxed); +// ---------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 0 +// ---------------------------------------------------------------------------------------- +// +// Here the two fetch_add sequentially consistent operations on x synchronize-with each other ensuring that if we observe r0 = 1 then we cannot observer r1 = 0; +// The thing to take note here is that we synchronized on the SAME atomic object, that being the atomic object x. +// Note that replacing the x.fetch_add() in Thread 1 with a sequentially consistent operation on another atomic object or a sequentially consistent fence can lead to +// observing r1 = 0 even if we observe r0 = 1. For example the following code may fail. +// +// ---------------------------------------------------------------------------------------- +// Inital State: +// eastl::atomic x = 0; +// eastl::atomic y = 0; +// eastl::atomic z = 0; +// ---------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------------------------------- +// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed); +// | x.fetch_add(1, memory_order_seq_cst); +// y.fetch_add(1, memory_order_seq_cst); | r1 = z.load(memory_order_relaxed); +// ---------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 0 +// ---------------------------------------------------------------------------------------- +// +// ---------------------------------------------------------------------------------------- +// Inital State: +// eastl::atomic x = 0; +// eastl::atomic y = 0; +// eastl::atomic z = 0; +// ---------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 +// ---------------------------------------------------------------------------------------- +// z.store(2, memory_order_relaxed); | r0 = y.load(memory_order_relaxed); +// x.fetch_add(1, memory_order_seq_cst); | atomic_thread_fence(memory_order_seq_cst); +// y.store(1, memory_order_relaxed); | r1 = z.load(memory_order_relaxed); +// ---------------------------------------------------------------------------------------- +// Observed: r0 = 1 && r1 = 0 +// ---------------------------------------------------------------------------------------- +// +// In this example it is entirely possible that we observe r0 = 1 && r1 = 0 even though we have source code causility and sequentially consistent operations. +// Observability is tied to the atomic object on which the operation was performed and the thread fence doesn't synchronize-with the fetch_add because there is no +// there is no load above the fence that reads the value from the fetch_add. +// +// ******** Sequential Consistency Semantics ******** +// +// See section, Order and consistency, in the C++ std and Reference [9]. +// +// A load with memory_order_seq_cst performs an acquire operation +// A store with memory_order_seq_cst performs a release operation +// A RMW with memory_order_seq_cst performs both an acquire and a release operation +// +// All memory_order_seq_cst operations exhibit the below single total order in which all threads observe all modifications in the same order +// +// Paraphrashing, there is a single total order on all memory_order_seq_cst operations, S, such that each sequentially consistent operation B that loads a value from +// atomic object M observes either the result of the last sequentially consistent modification A on M, or some modification on M that isn't memory_order_seq_cst. +// For atomic modifications A and B on an atomic object M, B occurs after A in the total order of M if: +// there is a memory_order_seq_cst fence X whereby A is sequenced before X, and X precedes B, +// there is a memory_order_seq_cst fence Y whereby Y is sequenced before B, and A precedes Y, +// there are memory_order_seq_cst fences X and Y such that A is sequenced before X, Y is sequenced before B, and X precedes Y. +// +// Let's look at some examples using memory_order_seq_cst. +// +// ------------------------------------------------------------ +// Store-Buffer +// ------------------------------------------------------------ +// Initial State: +// x = 0; y = 0; +// ------------------------------------------------------------ +// Thread 0 | Thread 1 +// ------------------------------------------------------------ +// STORE_RELAXED(x, 1) | STORE_RELAXED(y, 1) +// ATOMIC_THREAD_FENCE(SEQ_CST) | ATOMIC_THREAD_FENCE(SEQ_CST) +// r0 = LOAD_RELAXED(y) | r1 = LOAD_RELAXED(x) +// ------------------------------------------------------------ +// Observed: r0 = 0 && r1 = 0 +// ------------------------------------------------------------ +// +// ------------------------------------------------------------ +// Store-Buffer +// ------------------------------------------------------------ +// Initial State: +// x = 0; y = 0; +// ------------------------------------------------------------ +// Thread 0 | Thread 1 +// ------------------------------------------------------------ +// STORE_SEQ_CST(x, 1) | STORE_SEQ_CST(y, 1) +// r0 = LOAD_SEQ_CST(y) | r1 = LOAD_SEQ_CST(x) +// ------------------------------------------------------------ +// Observed: r0 = 0 && r1 = 0 +// ------------------------------------------------------------ +// +// Both solutions above are correct to ensure that the end results cannot lead to both r0 and r1 returning 0. Notice that the second one requires memory_order_seq_cst on both +// operations to ensure they are in the total order, S, for all memory_order_seq_cst operations. The other example uses the stronger guarantee provided by a sequentially consistent fence. +// +// ------------------------------------------------------------------------------------------------ +// Read-To-Write Causality +// ------------------------------------------------------------------------------------------------ +// Initial State: +// x = 0; y = 0; +// ------------------------------------------------------------------------------------------------ +// Thread 0 | Thread 1 | Thread 2 +// ------------------------------------------------------------------------------------------------ +// STORE_SEQ_CST(x, 1) | r0 = LOAD_RELAXED(x) | STORE_RELAXED(y, 1) +// | ATOMIC_THREAD_FENCE(SEQ_CST) | ATOMIC_THREAD_FENCE(SEQ_CST) +// | r1 = LOAD_RELAXED(y) | r2 = LOAD_RELAXED(x) +// ------------------------------------------------------------------------------------------------ +// Observed: r0 = 1 && r1 = 0 && r2 = 0 +// ------------------------------------------------------------------------------------------------ +// +// You'll notice this example is an inbetween example of the Store-Buffer and IRIW examples we have seen earlier. The store in Thread 0 needs to be sequentially consistent so it synchronizes with the +// thread fence in Thread 1. C++20 due to Reference [9], increased the strength of sequentially consistent fences has been increased to allow for the following. +// +// ------------------------------------------------------------------------------------------------ +// Read-To-Write Causality - C++20 +// ------------------------------------------------------------------------------------------------ +// Initial State: +// x = 0; y = 0; +// ------------------------------------------------------------------------------------------------ +// Thread 0 | Thread 1 | Thread 2 +// ------------------------------------------------------------------------------------------------ +// STORE_RELAXED(x, 1) | r0 = LOAD_RELAXED(x) | STORE_RELAXED(y, 1) +// | ATOMIC_THREAD_FENCE(SEQ_CST) | ATOMIC_THREAD_FENCE(SEQ_CST) +// | r1 = LOAD_RELAXED(y) | r2 = LOAD_RELAXED(x) +// ------------------------------------------------------------------------------------------------ +// Observed: r0 = 1 && r1 = 0 && r2 = 0 +// ------------------------------------------------------------------------------------------------ +// +// Notice we were able to turn the store in Thread 0 into a relaxed store and still properly observe either r1 or r2 returning 1. +// Note that all implementations of the C++11 standard for every architecture even now allows the C++20 behaviour. +// The C++20 standard memory model was brought up to recognize that all current implementations are able to implement them stronger. +// +// ******** False Sharing ******** +// +// As we know operations work on the granularity of a cache line. A RMW operation obviously must have some help from the cache to ensure the entire operation +// is seen a one whole unit. Conceptually we can think of this as the cpu's cache taking a lock on the cacheline, the cpu doing the read-modify-write operation on the +// locked cacheline, and then releasing the lock on the cacheline. This means during that time any other cpu needing that cacheline must wait for the lock to be released. +// +// If we have two atomic objects doing RMW operations and they are within the same cacheline, they are unintentionally contending and serializing with each other even +// though they are two completely separate objects. This gives us the common name to this phenomona called false sharing. +// You can cacheline align your structure or the eastl::atomic object to prevent false sharing. +// +// ******** union of eastl::atomic ******** +// +// union { eastl::atomic atomic8; eastl::atomic atomic32; }; +// +// While we know that operations operate at the granularity of a processor's cacheline size and so we may expect that storing and loading +// from different width atomic variables at the same address to not cause weird observable behaviour but it may. +// Store Buffers allow smaller stores to replace parts of larger loads that are forwarded from a store buffer. +// This means if there is 2 bytes of modified data in the store buffer that overlaps with a 4 byte load, the 2 bytes will be forwarded +// from the store buffer. This is even documented behaviour of the x86 store buffer in the x86 architecture manual. +// This behaviour can cause processors to observe values that have never and will never be visible on the bus to other processors. +// The use of a union with eastl::atomic is not wrong but your code must be able to withstand these effects. +// +// Assume everything starts out initially as zero. +// +// ------------------------------------------------------------------------------------------------------- +// Thread 0 | Thread 1 | Thread 2 +// -------------------------------------------------------------------------------------------------------- +// cmpxchg 0 -> 0x11111111 | cmpxchg 0x11111111 -> 0x22222222 | mov byte 0x33; mov 4 bytes into register; +// --------------------------------------------------------------------------------------------------------- +// +// After all operations complete, the value in memory at that location is, 0x22222233. +// It is possible that the 4 byte load in thread 2 actually returns 0x11111133. +// Now 0x11111133 is an observed value that no other cpu could observe because it was never globally visible on the data bus. +// +// If the value in memory is 0x22222233 then the first cmpxchg succeeded, then the second cmpxchg succeeded and finally our +// byte to memory was stored, yet our load returned 0x11111133. This is because store buffer contents can be forwarded to overlapping loads. +// It is possible that the byte store got put in the store buffer. Our load happened after the first cmpxchg with the byte forwarded. +// This behaviour is fine as along as your algorithm is able to cope with this kind of store buffer forwarding effects. +// +// Reference [13] is a great read on more about this topic of mixed-size concurrency. +// + + +///////////////////////////////////////////////////////////////////////////////// + + +#include +#include +#include +#include + + +#endif /* EASTL_ATOMIC_H */ diff --git a/include/EASTL/bitset.h b/include/EASTL/bitset.h index f20feb60..d9261050 100644 --- a/include/EASTL/bitset.h +++ b/include/EASTL/bitset.h @@ -27,30 +27,23 @@ #include #include -#ifdef _MSC_VER - #pragma warning(push, 0) -#endif +EA_DISABLE_ALL_VC_WARNINGS(); + #include #include -#ifdef _MSC_VER - #pragma warning(pop) -#endif + +EA_RESTORE_ALL_VC_WARNINGS(); #if EASTL_EXCEPTIONS_ENABLED - #ifdef _MSC_VER - #pragma warning(push, 0) - #endif + EA_DISABLE_ALL_VC_WARNINGS(); + #include // std::out_of_range, std::length_error. - #ifdef _MSC_VER - #pragma warning(pop) - #endif -#endif -#if defined(_MSC_VER) - #pragma warning(push) - #pragma warning(disable: 4127) // Conditional expression is constant + EA_RESTORE_ALL_VC_WARNINGS(); #endif +EA_DISABLE_VC_WARNING(4127); // Conditional expression is constant + #if defined(EA_PRAGMA_ONCE_SUPPORTED) #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. #endif @@ -2234,21 +2227,6 @@ EA_RESTORE_GCC_WARNING() } // namespace eastl -#if defined(_MSC_VER) - #pragma warning(pop) -#endif - +EA_RESTORE_VC_WARNING(); #endif // Header include guard - - - - - - - - - - - - diff --git a/include/EASTL/bitvector.h b/include/EASTL/bitvector.h index ec2bdaeb..ade67823 100644 --- a/include/EASTL/bitvector.h +++ b/include/EASTL/bitvector.h @@ -22,10 +22,7 @@ #include #include -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable: 4480) // nonstandard extension used: specifying underlying type for enum -#endif +EA_DISABLE_VC_WARNING(4480); // nonstandard extension used: specifying underlying type for enum #if defined(EA_PRAGMA_ONCE_SUPPORTED) #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. @@ -1472,21 +1469,6 @@ namespace eastl } // namespace eastl -#ifdef _MSC_VER - #pragma warning(pop) -#endif - +EA_RESTORE_VC_WARNING(); #endif // Header include guard - - - - - - - - - - - - diff --git a/include/EASTL/bonus/adaptors.h b/include/EASTL/bonus/adaptors.h index a7e609ba..7e4cd2a6 100644 --- a/include/EASTL/bonus/adaptors.h +++ b/include/EASTL/bonus/adaptors.h @@ -12,6 +12,7 @@ #include #include +#include #if defined(EA_PRAGMA_ONCE_SUPPORTED) #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. @@ -19,7 +20,7 @@ EA_DISABLE_VC_WARNING(4512 4626) #if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015+ - EA_DISABLE_VC_WARNING(5027) // move assignment operator was implicitly defined as deleted + EA_DISABLE_VC_WARNING(5027) // move assignment operator was implicitly defined as deleted #endif @@ -27,7 +28,7 @@ namespace eastl { /// reverse /// - /// This adaptor allows reverse iteration of a container in ranged base for-loops. + /// This adaptor allows reverse iteration of a container in ranged base for-loops. /// /// for (auto& i : reverse(c)) { ... } /// @@ -35,21 +36,46 @@ namespace eastl struct reverse_wrapper { template - reverse_wrapper(C&& c) : mContainer(eastl::forward(c)) {} + reverse_wrapper(C&& c) + : mContainer(eastl::forward(c)) + { + /** + * NOTE: + * + * Due to reference collapsing rules of universal references Container type is either + * + * const C& if the input is a const lvalue + * C& if the input is a non-const lvalue + * C if the input is an rvalue + * const C if the input is a const rvalue thus the object will have to be copied and the copy-ctor will be called + * + * + * Thus we either move the whole container into this object or take a reference to the lvalue avoiding the copy. + * The static_assert below ensures this. + */ + static_assert(eastl::is_same_v, "Reference collapsed deduced type must be the same as the deduced Container type!"); + } + Container mContainer; }; template auto begin(const reverse_wrapper& w) -> decltype(rbegin(w.mContainer)) - { return rbegin(w.mContainer); } + { + return rbegin(w.mContainer); + } template auto end(const reverse_wrapper& w) -> decltype(rend(w.mContainer)) - { return rend(w.mContainer); } + { + return rend(w.mContainer); + } template reverse_wrapper reverse(Container&& c) - { return reverse_wrapper(eastl::forward(c)); } + { + return reverse_wrapper(eastl::forward(c)); + } } // namespace eastl @@ -59,17 +85,3 @@ namespace eastl EA_RESTORE_VC_WARNING() #endif // Header include guard - - - - - - - - - - - - - - diff --git a/include/EASTL/bonus/sparse_matrix.h b/include/EASTL/bonus/sparse_matrix.h deleted file mode 100644 index dd8ea653..00000000 --- a/include/EASTL/bonus/sparse_matrix.h +++ /dev/null @@ -1,1581 +0,0 @@ -///////////////////////////////////////////////////////////////////////////// -// Copyright (c) Electronic Arts Inc. All rights reserved. -///////////////////////////////////////////////////////////////////////////// - - -/////////////////////////////////////////////////////////////////////////////// -// *** Note *** -// This implementation is incomplete. -// -// Additionally, this current implementation is not yet entirely in line with -// EASTL conventions and thus may appear a little out of place to the observant. -// The goal is to bring thus file up to current standards in a future version. -/////////////////////////////////////////////////////////////////////////////// - - -// To do: -// Remove forward declarations of classes. -// Remove mCol variable from matrix_cell. -// Make iterators have const and non-const versions. -// Remove mpCell from sparse_matrix_col_iterator. -// Remove mpRow from sparse_matrix_row_iterator. -// Remove mpMatrix from iterators. - - -/////////////////////////////////////////////////////////////////////////////// -// This file implements a sparse matrix, which is a 2 dimensional array of -// cells of an arbitrary type T. It is useful for situations where you need -// to store data in a very sparse way. The cost of storing an individual cell -// is higher than with a 2D array (or vector of vectors), but if the array is -// sparse, then a sparse matrix can save memory. It can also iterate non-empty -// cells faster than a regular 2D array, as only used cells are stored. -/////////////////////////////////////////////////////////////////////////////// - - -#ifndef EASTL_SPARSE_MATRIX_H -#define EASTL_SPARSE_MATRIX_H - -#if 0 - -#include -#include -#include -#include - -#if defined(EA_PRAGMA_ONCE_SUPPORTED) - #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. -#endif - - - -namespace eastl -{ - - // kRowColIndexNone - // Refers to a row that is non-existant. If you call a function that returns a - // row or col index, and get kSparseMatrixIndexNone, the row or col doesn't exist. - static const int32_t kSparseMatrixIntMin = (-2147483647 - 1); - static const int32_t kSparseMatrixIntMax = 2147483647; - - - /////////////////////////////////////////////////////////////////////////////// - // Forward declarations - // - template struct matrix_cell; - template struct matrix_row; - template class sparse_matrix; - template class sparse_matrix_row_iterator; - template class sparse_matrix_col_iterator; - template class sparse_matrix_iterator; - - - - /////////////////////////////////////////////////////////////////////////////// - /// matrix_cell - /// - template - struct matrix_cell - { - public: - typedef matrix_cell this_type; - typedef T value_type; - - public: - int mCol; - value_type mValue; - - public: - matrix_cell(int nCol = 0); - matrix_cell(int nCol, const value_type& value); - - }; // matrix_cell - - - - /////////////////////////////////////////////////////////////////////////// - /// matrix_row - /// - template - struct matrix_row - { - public: - typedef Allocator allocator_type; - typedef matrix_row this_type; - typedef T value_type; - typedef matrix_cell cell_type; - typedef eastl::map, allocator_type> CellMap; - - public: - int mRow; - CellMap mCellRow; - - public: - matrix_row(int nRow = 0); - - // This function finds the given column in this row, if present. - // The result is a cell, and the pointer to the cell data itself - // is returned in the 'pCell' argument. - bool GetMatrixCol(int nCol, cell_type*& pCell); - - }; // matrix_row - - - - - /////////////////////////////////////////////////////////////////////////////// - /// sparse_matrix_row_iterator - /// - /// Iterates cells in a given row of a sparse matrix. - /// - template - class sparse_matrix_row_iterator - { - public: - typedef sparse_matrix_row_iterator this_type; - typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. - typedef ptrdiff_t difference_type; - typedef T value_type; - typedef T& reference; - typedef T* pointer; - typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; - - typedef sparse_matrix MatrixType; - typedef matrix_row row_type; - typedef matrix_cell cell_type; - typedef eastl::map RowMap; - typedef typename row_type::CellMap CellMap; - - public: - MatrixType* mpMatrix; - row_type* mpRow; - typename CellMap::iterator mCellMapIterator; - - public: - sparse_matrix_row_iterator(MatrixType* pMatrix, row_type* pRow, const typename CellMap::iterator& ic) - : mpMatrix(pMatrix), mpRow(pRow), mCellMapIterator(ic) - { - } - - sparse_matrix_row_iterator(MatrixType* pMatrix = NULL) - : mpMatrix(pMatrix), mpRow(NULL) - { - } - - int GetCol() // Returns kSparseMatrixIntMin if iterator is 'empty'. We don't - { // return -1 because sparse matrix is not limited to rows/cols >= 0. - if(mpRow) // You can have a matrix that starts at column -100 and row -500. - { - const cell_type& cell = (*mCellMapIterator).second; - return cell.mCol; - } - return kSparseMatrixIntMin; - } - - int GetRow() - { - if(mpRow) - return mpRow->mRow; - return kSparseMatrixIntMin; - } - - bool operator==(const this_type& x) const - { - if(!mpRow && !x.mpRow) // If we are comparing 'empty' iterators... - return true; - - // The first check below wouldn't be necessary if we had a guarantee the iterators can compare between different rows. - return (mpRow == x.mpRow) && (mCellMapIterator == x.mCellMapIterator); - } - - bool operator!=(const this_type& x) const - { - return !operator==(x); - } - - reference operator*() const - { - const cell_type& cell = (*mCellMapIterator).second; - return cell.mValue; - } - - pointer operator->() const - { - const cell_type& cell = (*mCellMapIterator).second; - return &cell.mValue; - } - - this_type& operator++() - { - ++mCellMapIterator; - return *this; - } - - this_type operator++(int) - { - this_type tempCopy = *this; - ++*this; - return tempCopy; - } - - }; // sparse_matrix_row_iterator - - - - /////////////////////////////////////////////////////////////////////////////// - /// sparse_matrix_col_iterator - /// - /// Iterates cells in a given column of a sparse matrix. Do not modify the - /// sparse_matrix while iterating through it. You can do this with some - /// STL classes, but I'd rather not have to support this kind of code in - /// the future here. - /// - template - class sparse_matrix_col_iterator - { - public: - typedef sparse_matrix_col_iterator this_type; - typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. - typedef ptrdiff_t difference_type; - typedef T value_type; - typedef T& reference; - typedef T* pointer; - typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; - - typedef sparse_matrix MatrixType; - typedef matrix_row row_type; - typedef matrix_cell cell_type; - typedef eastl::map RowMap; - typedef typename row_type::CellMap CellMap; - - public: - MatrixType* mpMatrix; - typename RowMap::iterator mRowMapIterator; - cell_type* mpCell; - - public: - sparse_matrix_col_iterator(MatrixType* pMatrix, const typename RowMap::iterator& i, cell_type* pCell) - : mpMatrix(pMatrix), mRowMapIterator(i), mpCell(pCell) - { - } - - sparse_matrix_col_iterator(MatrixType* pMatrix = NULL) - : mpMatrix(pMatrix), mpCell(NULL) - { - } - - int GetCol() // Returns kSparseMatrixIntMin if iterator is 'empty'. We don't return -1 - { // because sparse matrix is not limited to rows/cols >= 0. - if(mpCell) // You can have a matrix that starts at column -100 and row -500. - return mpCell->mCol; - return kSparseMatrixIntMin; - } - - int GetRow() - { - if(mpCell) // This might look strange, but we are using 'pCell' to - return (*mRowMapIterator).second.mRow; // simply tell us if the iterator is 'empty' or not. - return kSparseMatrixIntMin; - } - - bool operator==(const this_type& x) const - { - if(!mpCell && !x.mpCell) // If we are comparing 'empty' iterators... - return true; - - // The second check below wouldn't be necessary if we had a guarantee the iterators can compare between different maps. - return (mRowMapIterator == x.mRowMapIterator) && (mpCell == x.mpCell); - } - - bool operator!=(const this_type& x) const - { - return !operator==(x); - } - - reference operator*() const - { - return mpCell->mValue; - } - - reference operator->() const - { - return &mpCell->mValue; - } - - this_type& operator++() - { - ++mRowMapIterator; - - while(mRowMapIterator != mpMatrix->mRowMap.end()) - { - row_type& row = (*mRowMapIterator).second; - - // Can't we just use row.mCellRow.find(cell)? - typename CellMap::const_iterator it = row.mCellRow.find(mpCell->mCol); - - if(it != row.mCellRow.end()) - { - mpCell = const_cast(&(*it).second); // Trust me, we won't be modifying the data. - return *this; - } - - // Linear search: - //for(typename CellMap::iterator it(row.mCellRow.begin()); it != row.mCellRow.end(); ++it) - //{ - // const cell_type& cell = (*it).second; - // - // if(cell.mCol == mpCell->mCol) - // { - // mpCell = const_cast(&cell); // Trust me, we won't be modifying the data. - // return *this; - // } - //} - - ++mRowMapIterator; - } - - mpCell = NULL; - return *this; - } - - this_type operator++(int) - { - this_type tempCopy = *this; - ++*this; - return tempCopy; - } - - }; // sparse_matrix_col_iterator - - - - /////////////////////////////////////////////////////////////////////////////// - /// sparse_matrix_iterator - /// - /// Iterates cells of a sparse matrix, by rows and columns. Each row is iterated - /// and each column within that row is iterated in order. - /// - template - class sparse_matrix_iterator - { - public: - typedef sparse_matrix_iterator this_type; - typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. - typedef ptrdiff_t difference_type; - typedef T value_type; - typedef T& reference; - typedef T* pointer; - typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; - - typedef sparse_matrix MatrixType; - typedef matrix_row row_type; - typedef matrix_cell cell_type; - typedef eastl::map RowMap; - typedef typename row_type::CellMap CellMap; - - public: - MatrixType* mpMatrix; - typename RowMap::iterator mRowMapIterator; - typename CellMap::iterator mCellMapIterator; - - public: - sparse_matrix_iterator(MatrixType* pMatrix, const typename RowMap::iterator& ir, const typename CellMap::iterator& ic) - : mpMatrix(pMatrix), mRowMapIterator(ir), mCellMapIterator(ic) - { - } - - sparse_matrix_iterator(MatrixType* pMatrix, const typename RowMap::iterator& ir) - : mpMatrix(pMatrix), mRowMapIterator(ir), mCellMapIterator() - { - } - - int GetCol() - { - const cell_type& cell = (*mCellMapIterator).second; - return cell.mCol; - } - - int GetRow() - { - const row_type& row = (*mRowMapIterator).second; - return row.mRow; - } - - bool operator==(const this_type& x) const - { - return (mRowMapIterator == x.mRowMapIterator) && (mCellMapIterator == x.mCellMapIterator); - } - - bool operator!=(const this_type& x) const - { - return (mRowMapIterator != x.mRowMapIterator) || (mCellMapIterator != x.mCellMapIterator); - } - - reference operator*() const - { - cell_type& cell = (*mCellMapIterator).second; - return cell.mValue; - } - - this_type& operator++() - { - ++mCellMapIterator; // Increment the current cell (column) in the current row. - - row_type& row = (*mRowMapIterator).second; - - if(mCellMapIterator == row.mCellRow.end()) // If we hit the end of the current row... - { - ++mRowMapIterator; - - while(mRowMapIterator != mpMatrix->mRowMap.end()) // While we haven't hit the end of rows... - { - row_type& row = (*mRowMapIterator).second; - - if(!row.mCellRow.empty()) // If there are any cells (columns) in this row... - { - mCellMapIterator = row.mCellRow.begin(); - break; - } - - ++mRowMapIterator; - } - } - - return *this; - } - - this_type operator++(int) - { - this_type tempCopy = *this; - operator++(); - return tempCopy; - } - - }; // sparse_matrix_iterator - - - - /////////////////////////////////////////////////////////////////////////////// - /// sparse_matrix - /// - template - class sparse_matrix - { - public: - typedef sparse_matrix this_type; - typedef T value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef ptrdiff_t difference_type; - typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. - typedef sparse_matrix_row_iterator row_iterator; - typedef sparse_matrix_col_iterator col_iterator; - typedef sparse_matrix_iterator iterator; - typedef sparse_matrix_iterator const_iterator; // To do: Fix this. - typedef Allocator allocator_type; - typedef matrix_row row_type; - typedef typename row_type::CellMap CellMap; - typedef eastl::map RowMap; - - // iterator friends - friend class sparse_matrix_row_iterator; - friend class sparse_matrix_col_iterator; - friend class sparse_matrix_iterator; - - // kRowColIndexNone - static const int32_t kRowColIndexNone = kSparseMatrixIntMin; - - // UserCell - // We don't internally use this struct to store data, because that would - // be inefficient. However, whenever the user of this class needs to query for - // individual cells, especially in batches, it is useful to have a struct that - // identifies both the cell coordinates and cell data for the user. - struct UserCell - { - int mCol; - int mRow; - T mValue; - }; - - public: - sparse_matrix(); - sparse_matrix(const sparse_matrix& x); - ~sparse_matrix(); - - this_type& operator=(const this_type& x); - - void swap(); - - // Iterators - row_iterator row_begin(int nRow); - row_iterator row_end(int nRow); - col_iterator col_begin(int nCol); - col_iterator col_end(int nCol); - iterator begin(); - iterator end(); - - // Standard interface functions - bool empty() const; // Returns true if no cells are used. - size_type size() const; // Returns total number of non-empty cells. - - int GetMinUsedRow(int& nResultCol) const; // Returns first row that has data. Fills in column that has that data. Returns kRowUnused if no row has data. - int GetMaxUsedRow(int& nResultCol) const; // Returns last row that has data. Fills in column that has that data. Returns kRowUnused if no row has data. - bool GetMinMaxUsedColForRow(int nRow, int& nMinCol, int& nMaxCol) const; // Sets the min and max column and returns true if any found. - bool GetMinMaxUsedRowForCol(int nCol, int& nMinRow, int& nMaxRow) const; // Sets the min and max row and returns true if any found. - size_type GetColCountForRow(int nRow) const; // You specify the row, it gives you the used cell count. - - int GetMinUsedCol(int& nResultRow) const; // Returns first column that has data. Fills in row that has that data. Returns kColUnused if no column has data. - int GetMaxUsedCol(int& nResultRow) const; // Returns last column that has data. Fills in row that has that data. Returns kColUnused if no column has data. - size_type GetRowCountForCol(int nCol) const; // - int GetRowWithMaxColCount(size_type& nColCount) const; // - - bool remove(int nRow, int nCol, T* pPreviousT = NULL); // If you pass in a 'pPreviousT', it will copy in value to it before removing the cell. - bool remove_row(int nRow, size_type nCount = 1); // Removes 'nCount' rows, starting at 'nRow'. - bool remove_col(int nCol, size_type nCount = 1); // Removes 'nCount' cols, starting at 'nCol'. - bool clear(); // Removes all cells. - void insert(int nRow, int nCol, const value_type& t, value_type* pPrevValue = NULL); // If you pass in a 'pPreviousT', it will copy in value to it before changing the cell. - bool IsCellUsed(int nRow, int nCol); // Returns true if cell is non-empty - - bool GetCell(int nRow, int nCol, value_type* pValue = NULL); // - bool GetCellPtr(int nRow, int nCol, value_type** pValue); // Gets a pointer to the cell itself, for direct manipulation. - size_type GetCellCountForRange(int nRowStart, int nRowEnd, - int nColStart, int nColEnd); // Counts cells in range. Range is inclusive. - int GetCellRange(int nRowStart, int nRowEnd, - int nColStart, int nColEnd, UserCell* pCellArray = NULL); // Copies cell data into the array of UserCells provided by the caller. - int FindCell(const value_type& t, UserCell* pCellArray = NULL); // Finds all cells that match the given argument cell. Call this function with NULL pCellArray to simply get the count. - - bool validate(); - int validate_iterator(const_iterator i) const; - - protected: - bool GetMatrixRow(int nRow, row_type*& pRow); - - protected: - RowMap mRowMap; /// Map of all row data. It is a map of maps. - size_type mnSize; /// The count of all cells. This is equal to the sums of the sizes of the maps in mRowMap. - allocator_type mAllocator; /// The allocator for all data. - - }; // sparse_matrix - - - - - - - - /////////////////////////////////////////////////////////////////////////////// - // matrix_cell - /////////////////////////////////////////////////////////////////////////////// - - template - matrix_cell::matrix_cell(int nCol = 0) - : mCol(nCol), mValue() - { - } - - template - matrix_cell::matrix_cell(int nCol, const value_type& value) - : mCol(nCol), mValue(value) - { - } - - - - - /////////////////////////////////////////////////////////////////////////////// - // matrix_row - /////////////////////////////////////////////////////////////////////////////// - - template - matrix_row::matrix_row(int nRow = 0) - : mRow(nRow), mCellRow() - { - } - - template - bool matrix_row::GetMatrixCol(int nCol, cell_type*& pCell) - { - #if EASTL_ASSERT_ENABLED - int nPreviousCol(sparse_matrix::kRowColIndexNone); - EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); - #endif - - typename CellMap::iterator it(mCellRow.find(nCol)); - - if(it != mCellRow.end()) - { - cell_type& cell = (*it).second; - pCell = &cell; - return true; - } - - return false; - } - - template - inline bool operator==(const matrix_row& a, const matrix_row& b) - { - return (a.mRow == b.mRow) && (a.mCellRow == b.mCellRow); - } - - template - inline bool operator==(const matrix_cell& a, const matrix_cell& b) - { - return (a.mValue == b.mValue); - } - - - - - /////////////////////////////////////////////////////////////////////////////// - // sparse_matrix - /////////////////////////////////////////////////////////////////////////////// - - template - inline sparse_matrix::sparse_matrix() - : mRowMap(), mnSize(0) - { - } - - - template - inline sparse_matrix::sparse_matrix(const this_type& x) - { - mnSize = x.mnSize; - mRowMap = x.mRowMap; - } - - - template - inline sparse_matrix::~sparse_matrix() - { - // Nothing to do. - } - - - template - inline typename sparse_matrix::this_type& - sparse_matrix::operator=(const this_type& x) - { - // Check for self-asignment is not needed, as the assignments below already do it. - mnSize = x.mnSize; - mRowMap = x.mRowMap; - return *this; - } - - - template - inline void sparse_matrix& sparse_matrix::swap() - { - eastl::swap(mnSize, x.mnSize); - eastl::swap(mRowMap, x.mRowMap); - } - - - template - inline bool sparse_matrix::empty() const - { - return (mnSize == 0); - } - - - template - inline typename sparse_matrix::size_type - sparse_matrix::size() const - { - return mnSize; - } - - - /////////////////////////////////////////////////////////////////////////////// - // row_begin - // - // This function returns a sparse matrix row iterator. It allows you to - // iterate all used cells in a given row. You pass in the row index and it - // returns an iterator for the first used cell. You can dereference the - // iterator to get the cell data. Just like STL containers, the end iterator - // is one-past the past the last valid iterator. A row iterator returned - // by this function is good only for that row; likewise, you can only use - // such a row iterator with the end iterator for that row and not with an - // end iterator for any other row. - // - // Here is an example of using a row iterator to iterate all used cells - // in row index 3 of a sparse matrix of 'int': - // sparse_matrix::row_iterator it = intMatrix.row_begin(3); - // sparse_matrix::row_iterator itEnd = intMatrix.row_end(3); - // - // while(it != itEnd) - // { - // printf("Col=%d, row=%d, value=%d\n", it.GetCol(), it.GetRow(), *it); - // ++it; - // } - // - template - typename sparse_matrix::row_iterator - sparse_matrix::row_begin(int nRow) - { - EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); - - row_type* pRow; - - if(GetMatrixRow(nRow, pRow)) - return sparse_matrix_row_iterator(this, pRow, pRow->mCellRow.begin()); - return sparse_matrix_row_iterator(this); //Create an 'empty' iterator. - } - - - /////////////////////////////////////////////////////////////////////////////// - // row_end - // - // Returns the end iterator for a given row. See the row_begin function for more. - // - template - inline typename sparse_matrix::row_iterator - sparse_matrix::row_end(int nRow) - { - EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); - - row_type* pRow; - - if(GetMatrixRow(nRow, pRow)) - return sparse_matrix_row_iterator(this, pRow, pRow->mCellRow.end()); - return sparse_matrix_row_iterator(this); //Create an 'empty' iterator. - } - - - /////////////////////////////////////////////////////////////////////////////// - // col_begin - // - // This function returns a sparse matrix column iterator. A column iterator - // acts just like a row iterator except it iterates cells in a column instead - // of cells in a row. - // - // Here is an example of using a column iterator to iterate all used cells - // in column index 0 (the first column) of a sparse matrix of 'int': - // sparse_matrix::col_iterator it = intMatrix.col_begin(0); - // sparse_matrix::col_iterator itEnd = intMatrix.col_end(0); - // - // while(it != itEnd) - // { - // printf("Col=%d, row=%d, value=%d\n", it.GetCol(), it.GetRow(), *it); - // ++it; - // } - // - template - typename sparse_matrix::col_iterator - sparse_matrix::col_begin(int nCol) - { - EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); - - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) - { - const row_type& matrixRowConst = (*it).second; - row_type& row = const_cast(matrixRowConst); - - for(typename CellMap::iterator it1(row.mCellRow.begin()); it1!=row.mCellRow.end(); ++it1) - { - const cell_type& cellConst = (*it1).second; - cell_type& cell = const_cast(cellConst); - - if(cell.mCol == nCol) - return sparse_matrix_col_iterator(this, it, &cell); - } - } - return sparse_matrix_col_iterator(this, mRowMap.end(), NULL); - } - - - /////////////////////////////////////////////////////////////////////////////// - // col_end - // - // Returns the end iterator for a given colum. See the col_begin function for more. - // - template - inline typename sparse_matrix::col_iterator - sparse_matrix::col_end(int nCol) - { - EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); - - return sparse_matrix_col_iterator(this, mRowMap.end(), NULL); - } - - - /////////////////////////////////////////////////////////////////////////////// - // begin - // - // This function returns a sparse matrix cell iterator. It iterates all used - // cells in the sparse matrix. The cells are returned in column,row order - // (as opposed to row,column order). Thus, all columns for a given row will - // be iterated before moving onto the next row. - // - // Here is an example of using an iterator to iterate all used cells: - // sparse_matrix::iterator it = intMatrix.begin(); - // sparse_matrix::iterator itEnd = intMatrix.end(); - // - // while(it != itEnd) - // { - // printf("Col=%d, row=%d, value=%d\n", it.GetCol(), it.GetRow(), *it); - // ++it; - // } - // - template - typename sparse_matrix::iterator - sparse_matrix::begin() - { - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) - { - row_type& row = (*it).second; - - if(!row.mCellRow.empty()) - return sparse_matrix_iterator(this, it, row.mCellRow.begin()); - } - return sparse_matrix_iterator(this, mRowMap.end()); - } - - - template - inline typename sparse_matrix::iterator - sparse_matrix::end() - { - return sparse_matrix_iterator(this, mRowMap.end()); - } - - - template - int sparse_matrix::GetMinUsedRow(int& nResultCol) const - { - if(!mRowMap.empty()) - { - const row_type& row = (*mRowMap.begin()).second; // Get the last row. - const cell_type& cell = (*row.mCellRow.begin()).second; // Get the first cell in that row, though it doesn't really matter which one we get. - - nResultCol = cell.mCol; - - return row.mRow; // Return the row of the last item in the map. - } - - nResultCol = kRowColIndexNone; - return kRowColIndexNone; - } - - - template - int sparse_matrix::GetMaxUsedRow(int& nResultCol) const - { - if(!mRowMap.empty()) - { - const row_type& row = (*mRowMap.rbegin()).second; // Get the last row. - const cell_type& cell = (*row.mCellRow.begin()).second; // Get the first cell in that row, though it doesn't really matter which one we get. - - nResultCol = cell.mCol; - - return row.mRow; // Return the row of the last item in the map. - } - - nResultCol = kRowColIndexNone; - return kRowColIndexNone; - } - - - template - bool sparse_matrix::GetMinMaxUsedColForRow(int nRow, int& nMinCol, int& nMaxCol) const - { - bool bReturnValue(false); - - EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); - - nMinCol = kSparseMatrixIntMax; - nMaxCol = kSparseMatrixIntMin; - - typename RowMap::iterator it(mRowMap.find(nRow)); - - if(it != mRowMap.end()) - { - const row_type& row = (*it).second; - EASTL_ASSERT(!row.mCellRow.empty()); // All rows should have at least one col, or we would have removed it. - - const cell_type& matrixCellFront = (*row.mCellRow.begin()).second; - const cell_type& matrixCellBack = (*row.mCellRow.rbegin()).second; - - nMinCol = matrixCellFront.mCol; - nMaxCol = matrixCellBack.mCol; - - bReturnValue = true; - } - - return bReturnValue; - } - - - /////////////////////////////////////////////////////////////////////////////// - // GetMinMaxUsedRowForCol - // - template - bool sparse_matrix::GetMinMaxUsedRowForCol(int nCol, int& nMinRow, int& nMaxRow) const - { - // The implementation of this function is a little tougher than with the "col for row" version of - // this function, since the data is stored in row maps instead of column maps. - bool bReturnValue(false); - - EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); - - nMinRow = kSparseMatrixIntMax; - nMaxRow = kSparseMatrixIntMin; - - //First search for the min row. - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) - { - row_type& row = (*it).second; - EASTL_ASSERT(!row.mCellRow.empty()); // All rows should have at least one col, or we would have removed the row. - - // Find the given column in this row. If present work on it. - typename CellMap::iterator it1(row.mCellRow.find(nCol)); - - if(it1 != row.mCellRow.end()) - { - nMinRow = row.mRow; - nMaxRow = row.mRow; - bReturnValue = true; - break; - } - } - - // Now search for a max row. - if(bReturnValue) // There can only be a max row if there was also a min row. - { - for(typename RowMap::reverse_iterator it(mRowMap.rbegin()); it != mRowMap.rend(); ++it) - { - row_type& row = (*it).second; - EASTL_ASSERT(!row.mCellRow.empty()); // All rows should have at least one col, or we would have removed the row. - - // Find the given column in this row. If present work on it. - typename CellMap::iterator it1(row.mCellRow.find(nCol)); - - if(it1 != row.mCellRow.end()) - { - nMaxRow = row.mRow; - break; - } - } - } - - return bReturnValue; - } - - - /////////////////////////////////////////////////////////////////////////////// - // GetColCountForRow - // - template - typename sparse_matrix::size_type - sparse_matrix::GetColCountForRow(int nRow) const - { - EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); - - row_type* pRow; - - if(GetMatrixRow(nRow, pRow)) - return (size_type)pRow->mCellRow.size(); - return 0; - } - - - /////////////////////////////////////////////////////////////////////////////// - // GetMinUsedCol - // - template - int sparse_matrix::GetMinUsedCol(int& nResultRow) const - { - int nMinCol = kRowColIndexNone; - nResultRow = kRowColIndexNone; - - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) - { - row_type& row = (*it).second; - EASTL_ASSERT(!row.mCellRow.empty()); // All rows should have at least one col, or we would have removed it. - - const cell_type& cell = (*row.mCellRow.begin()).second; - - if((cell.mCol < nMinCol) || (nMinCol == kRowColIndexNone)) - { - nMinCol = cell.mCol; - nResultRow = row.mRow; - } - } - - return nMinCol; - } - - - /////////////////////////////////////////////////////////////////////////////// - // GetMaxUsedCol - // - template - int sparse_matrix::GetMaxUsedCol(int& nResultRow) const - { - int nMaxCol = kRowColIndexNone; - nResultRow = kRowColIndexNone; - - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) - { - row_type& row = (*it).second; - EASTL_ASSERT(!row.mCellRow.empty()); // All rows should have at least one col, or we would have removed it. - - const cell_type& cell = (*row.mCellRow.rbegin()).second; - - if((cell.mCol > nMaxCol) || (nMaxCol == kRowColIndexNone)) - { - nMaxCol = cell.mCol; - nResultRow = row.mRow; - } - } - - return nMaxCol; - } - - - /////////////////////////////////////////////////////////////////////////////// - // GetRowCountForCol - // - template - typename sparse_matrix::size_type - sparse_matrix::GetRowCountForCol(int nCol) const - { - EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); - - size_type nRowCount = 0; - - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) - { - row_type& row = (*it).second; - EASTL_ASSERT(!row.mCellRow.empty()); - - //Faster set-based code: - typename CellMap::iterator it1(row.mCellRow.find(nCol)); - if(it1 != row.mCellRow.end()) - nRowCount++; - } - - return nRowCount; - } - - - /////////////////////////////////////////////////////////////////////////////// - // GetRowWithMaxColCount - // - template - int sparse_matrix::GetRowWithMaxColCount(size_type& nColCount) const - { - int nRow = 0; - nColCount = 0; - - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) - { - const row_type& row = (*it).second; - const size_type nSize(row.mCellRow.size()); - EASTL_ASSERT(nSize != 0); - - if(nSize > (size_type)nColCount) - { - nRow = row.mRow; - nColCount = nSize; - } - } - return nRow; - } - - - /////////////////////////////////////////////////////////////////////////// - // GetCellCountForRange - // - template - typename sparse_matrix::size_type - sparse_matrix::GetCellCountForRange(int nRowStart, int nRowEnd, int nColStart, int nColEnd) const - { - size_type nCellCount(0); - - // Note by Paul P.: This could be made a little faster by doing a search - // for the first row and iterating the container from then on. - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) - { - row_type& row = (*it).second; - - if(row.mRow < nRowStart) - continue; - - if(row.mRow > nRowEnd) - break; - - for(typename CellMap::iterator it1(row.mCellRow.begin()); it1 != row.mCellRow.end(); ++it1) - { - const cell_type& cell = (*it1).second; - - if(cell.mCol < nColStart) - continue; - - if(cell.mCol > nColEnd) - break; - - nCellCount++; - } - } - - return nCellCount; - } - - - /////////////////////////////////////////////////////////////////////////////// - // GetCellRange - // - template - int sparse_matrix::GetCellRange(int nRowStart, int nRowEnd, - int nColStart, int nColEnd, UserCell* pCellArray) const - { - int nCellCount(0); - - // Note by Paul P.: This could be made a little faster by doing a search - // for the first row and iterating the container from then on. - - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) - { - row_type& row = (*it).second; - - if(row.mRow < nRowStart) - continue; - if(row.mRow > nRowEnd) - break; - - for(typename CellMap::iterator it1(row.mCellRow.begin()); it1 != row.mCellRow.end(); ++it1) - { - const cell_type& cell = (*it1).second; - - if(cell.mCol < nColStart) - continue; - - if(cell.mCol > nColEnd) - break; - - if(pCellArray) - { - pCellArray[nCellCount].mCol = cell.mCol; - pCellArray[nCellCount].mRow = row.mRow; - pCellArray[nCellCount].mValue = cell.mValue; - } - - nCellCount++; - } - } - - return nCellCount; - } - - - /////////////////////////////////////////////////////////////////////////////// - // remove - // - template - bool sparse_matrix::remove(int nRow, int nCol, T* pPreviousT) - { - EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); - EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); - - //Faster map-based technique: - typename RowMap::iterator it(mRowMap.find(nRow)); - - if(it != mRowMap.end()) - { - row_type& row = (*it).second; - - typename CellMap::iterator it1(row.mCellRow.find(nCol)); - - if(it1 != row.mCellRow.end()) - { - cell_type& cell = (*it1).second; - - if(pPreviousT) - *pPreviousT = cell.mValue; - row.mCellRow.erase(it1); - mnSize--; - - if(row.mCellRow.empty()) // If the row is now empty and thus has no more columns... - mRowMap.erase(it); // Remove the row from the row map. - return true; - } - } - - return false; - } - - - /////////////////////////////////////////////////////////////////////////////// - // remove_row - // - template - bool sparse_matrix::remove_row(int nRow, size_type nCount) - { - EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); - - // Faster map-based technique: - for(int i(nRow), iEnd(nRow + (int)nCount); i < iEnd; i++) - { - typename RowMap::iterator it(mRowMap.find(i)); - - if(it != mRowMap.end()) // If the row is present... - { - row_type& row = (*it).second; - - mnSize -= row.mCellRow.size(); - mRowMap.erase(it); - } - } - - return true; - } - - - /////////////////////////////////////////////////////////////////////////////// - // remove_col - // - template - bool sparse_matrix::remove_col(int nCol, size_type nCount) - { - EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); - - // Faster map-based version: - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ) // For each row... - { - row_type& row = (*it).second; - - for(int i(nCol), iEnd(nCol + (int)nCount); i < iEnd; i++) - { - typename CellMap::iterator it1(row.mCellRow.find(i)); - - if(it1 != row.mCellRow.end()) // If the col is present... - { - row.mCellRow.erase(it1); - mnSize--; - } - } - - if(row.mCellRow.empty()) - mRowMap.erase(it++); - else - ++it; - } - - return true; - } - - - template - inline bool sparse_matrix::clear() - { - mRowMap.clear(); // Clear out the map of maps. - mnSize = 0; - return true; - } - - - template - void sparse_matrix::insert(int nRow, int nCol, const T& t, T* pPreviousT) - { - EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); - EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); - - typename RowMap::iterator it(mRowMap.find(nRow)); - - if(it != mRowMap.end()) // If the row is already present... - { - row_type& row = (*it).second; - - typename CellMap::iterator it1(row.mCellRow.find(nCol)); - - if(it1 != row.mCellRow.end()) // If the col is already present... - { - cell_type& cell = (*it1).second; - - if(pPreviousT) - *pPreviousT = cell.mValue; - cell.mValue = t; - // Note that we leave 'mnSize' as is. - } - else - { - const typename CellMap::value_type insertionPair(nCol, cell_type(nCol, t)); - row.mCellRow.insert(insertionPair); - mnSize++; - } - } - else // Else the row doesn't exist (and the column in that row doesn't exist either). - { - const typename RowMap::value_type insertionPair(nRow, row_type(nRow)); - - eastl::pair insertionResult = mRowMap.insert(insertionPair); - row_type& row = (*insertionResult.first).second; - - EASTL_ASSERT(row.mRow == nRow); // Make sure we are now on the row we just inserted. - const typename CellMap::value_type insertionPair1(nCol, cell_type(nCol, t)); - row.mCellRow.insert(insertionPair1); // Now add the new cell to the new row. - mnSize++; - } - } - - - template - bool sparse_matrix::IsCellUsed(int nRow, int nCol) - { - EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); - EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); - - typename RowMap::iterator it(mRowMap.find(nRow)); - - if(it != mRowMap.end()) - { - row_type& row = (*it).second; - - typename CellMap::iterator it1(row.mCellRow.find(nCol)); - if(it1 != row.mCellRow.end()) - return true; - } - - return false; - } - - - template - bool sparse_matrix::GetCell(int nRow, int nCol, T* pT) - { - EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); - - row_type* pRow; - cell_type* pCell; - - if(GetMatrixRow(nRow, pRow)) - { - if(pRow->GetMatrixCol(nCol, pCell)) - { - if(pT) - *pT = pCell->mValue; - return true; - } - } - - return false; - } - - - template - bool sparse_matrix::GetCellPtr(int nRow, int nCol, T** pT) - { - EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); - - row_type* pRow; - cell_type* pCell; - - if(GetMatrixRow(nRow, pRow)) - { - if(pRow->GetMatrixCol(nCol, pCell)) - { - if(pT) - *pT = &pCell->mValue; - return true; - } - } - - return false; - } - - - template - bool sparse_matrix::GetMatrixRow(int nRow, row_type*& pRow) - { - EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); - - typename RowMap::iterator it(mRowMap.find(nRow)); - - if(it != mRowMap.end()) - { - row_type& row = (*it).second; - pRow = &row; - return true; - } - - return false; - } - - - /////////////////////////////////////////////////////////////////////////////// - // FindCell - // - // Searches all cells for a match for input data 't'. Writes the cell data into - // the user celldata array. Call with a NULL pCellArray to get the count. - // - // This is a simple search function. Many real-world applications would need a - // slightly more flexible search function or mechanism. - // - template - int sparse_matrix::FindCell(const T& t, UserCell* pCellArray) - { - int nCount(0); - - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) - { - row_type& row = (*it).second; - - for(typename CellMap::iterator it1(row.mCellRow.begin()); it1 != row.mCellRow.end(); ++it1) - { - cell_type& cell = (*it1).second; - - if(cell.mValue == t) - { - if(pCellArray) - { - UserCell& cell = pCellArray[nCount]; - - cell.mCol = cell.mCol; - cell.mRow = row.mRow; - cell.mValue = t; - } - nCount++; - } - } - } - - return nCount; - } - - - /////////////////////////////////////////////////////////////////////////////// - // validate - // - template - bool sparse_matrix::validate() - { - int nPreviousCol; - int nPreviousRow = kRowColIndexNone; - size_type nActualTotalCells = 0; - - for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) - { - row_type& row = (*it).second; - - if(row.mCellRow.empty()) - { - // EASTL_TRACE("sparse_matrix::validate(): Error: Empty Cell Row %d.\n", row.mRow); - return false; - } - - nPreviousCol = kRowColIndexNone; - - for(typename CellMap::iterator it1(row.mCellRow.begin()); it1 != row.mCellRow.end(); ++it1) - { - cell_type& cell = (*it1).second; - - if(cell.mCol <= nPreviousCol) - { - // EASTL_TRACE("sparse_matrix::validate(): Error: Columns out of order in row, col: %d, %d.\n", row.mRow, cell.mCol); - return false; - } - - nPreviousCol = cell.mCol; - nActualTotalCells++; - } - - if(row.mRow <= nPreviousRow) - { - // EASTL_TRACE("sparse_matrix::validate(): Error: Rows out of order at row: %d.\n", row.mRow); - return false; - } - - nPreviousRow = row.mRow; - } - - if(mnSize != nActualTotalCells) - { - // EASTL_TRACE("sparse_matrix::validate(): Error: 'mnSize' != counted cells %d != %d\n", mnSize, nActualTotalCells); - return false; - } - - return true; - } - - - template - int sparse_matrix::validate_iterator(const_iterator i) const - { - // To do: Complete this. The value below is a potential false positive. - return (isf_valid | isf_current | isf_can_dereference); - } - - - - - /////////////////////////////////////////////////////////////////////////// - // global operators - /////////////////////////////////////////////////////////////////////////// - - template - bool operator==(sparse_matrix& a, sparse_matrix& b) - { - return (a.mRowMap == b.mRowMap); - } - - template - bool operator<(sparse_matrix& a, sparse_matrix& b) - { - return (a.mRowMap < b.mRowMap); - } - - template - bool operator!=(sparse_matrix& a, sparse_matrix& b) - { - return !(a.mRowMap == b.mRowMap); - } - - template - bool operator>(sparse_matrix& a, sparse_matrix& b) - { - return (b.mRowMap < a.mRowMap); - } - - template - bool operator<=(sparse_matrix& a, sparse_matrix& b) - { - return !(b.mRowMap < a.mRowMap); - } - - template - bool operator>=(sparse_matrix& a, sparse_matrix& b) - { - return !(a.mRowMap < b.mRowMap); - } - - template - void swap(sparse_matrix& a, sparse_matrix& b) - { - a.swap(b); - } - - - -} // namespace eastl - -#endif - -#endif // Header include guard - - - - - - - - - - - - - - - diff --git a/include/EASTL/chrono.h b/include/EASTL/chrono.h index 4f8f710a..453ab0f4 100644 --- a/include/EASTL/chrono.h +++ b/include/EASTL/chrono.h @@ -27,22 +27,25 @@ // TODO: move to platform specific cpp or header file #if defined EA_PLATFORM_MICROSOFT - #pragma warning(push, 0) + EA_DISABLE_ALL_VC_WARNINGS() + #ifndef WIN32_LEAN_AND_MEAN - #define WIN32_LEAN_AND_MEAN + #define WIN32_LEAN_AND_MEAN #endif - EA_DISABLE_ALL_VC_WARNINGS() + #undef NOMINMAX #define NOMINMAX + #include + #ifdef min #undef min #endif - #ifdef max + #ifdef max #undef max #endif + EA_RESTORE_ALL_VC_WARNINGS() - #pragma warning(pop) #endif #if defined(EA_PLATFORM_MICROSOFT) && !defined(EA_PLATFORM_MINGW) diff --git a/include/EASTL/deque.h b/include/EASTL/deque.h index 3b570092..0451bca3 100644 --- a/include/EASTL/deque.h +++ b/include/EASTL/deque.h @@ -90,20 +90,21 @@ EA_RESTORE_ALL_VC_WARNINGS() EA_RESTORE_ALL_VC_WARNINGS() #endif -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable: 4267) // 'argument' : conversion from 'size_t' to 'const uint32_t', possible loss of data. This is a bogus warning resulting from a bug in VC++. - #pragma warning(disable: 4345) // Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized - #pragma warning(disable: 4480) // nonstandard extension used: specifying underlying type for enum - #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc - #pragma warning(disable: 4571) // catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. - #if EASTL_EXCEPTIONS_ENABLED - #pragma warning(disable: 4703) // potentially uninitialized local pointer variable used. VC++ is mistakenly analyzing the possibility of uninitialized variables, though it's not easy for it to do so. - #pragma warning(disable: 4701) // potentially uninitialized local variable used. - #endif + +// 4267 - 'argument' : conversion from 'size_t' to 'const uint32_t', possible loss of data. This is a bogus warning resulting from a bug in VC++. +// 4345 - Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized +// 4480 - nonstandard extension used: specifying underlying type for enum +// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc +// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. +EA_DISABLE_VC_WARNING(4267 4345 4480 4530 4571); + +#if EASTL_EXCEPTIONS_ENABLED + // 4703 - potentially uninitialized local pointer variable used. VC++ is mistakenly analyzing the possibility of uninitialized variables, though it's not easy for it to do so. + // 4701 - potentially uninitialized local variable used. + EA_DISABLE_VC_WARNING(4703 4701) #endif - + #if defined(EA_PRAGMA_ONCE_SUPPORTED) #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. #endif @@ -2677,16 +2678,10 @@ namespace eastl } // namespace eastl -#ifdef _MSC_VER - #pragma warning(pop) +EA_RESTORE_VC_WARNING(); +#if EASTL_EXCEPTIONS_ENABLED + EA_RESTORE_VC_WARNING(); #endif #endif // Header include guard - - - - - - - diff --git a/include/EASTL/fixed_allocator.h b/include/EASTL/fixed_allocator.h index e6a12ea6..488eae4a 100644 --- a/include/EASTL/fixed_allocator.h +++ b/include/EASTL/fixed_allocator.h @@ -20,18 +20,13 @@ #include #include -#ifdef _MSC_VER - #pragma warning(push, 0) - #include - #pragma warning(pop) -#else - #include -#endif +EA_DISABLE_ALL_VC_WARNINGS(); -#if defined(_MSC_VER) - #pragma warning(push) - #pragma warning(disable: 4275) // non dll-interface class used as base for DLL-interface classkey 'identifier' -#endif +#include + +EA_RESTORE_ALL_VC_WARNINGS(); + +EA_DISABLE_VC_WARNING(4275); // non dll-interface class used as base for DLL-interface classkey 'identifier' #if defined(EA_PRAGMA_ONCE_SUPPORTED) #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. @@ -455,12 +450,6 @@ namespace eastl } // namespace eastl -#if defined(_MSC_VER) - #pragma warning(pop) -#endif - +EA_RESTORE_VC_WARNING(); #endif // Header include guard - - - diff --git a/include/EASTL/functional.h b/include/EASTL/functional.h index 03c26978..556bf020 100644 --- a/include/EASTL/functional.h +++ b/include/EASTL/functional.h @@ -8,7 +8,6 @@ #include #include -#include #include #include #include @@ -1011,14 +1010,12 @@ namespace eastl // utility to disable the generic template specialization that is // used for enum types only. template - struct EnableHashIf - { - }; + struct EnableHashIf {}; template struct EnableHashIf { - size_t operator()(const T& p) const { return size_t(p); } + size_t operator()(T p) const { return size_t(p); } }; } // namespace Internal @@ -1026,10 +1023,7 @@ namespace eastl template struct hash; template - struct hash : Internal::EnableHashIf> - { - size_t operator()(T p) const { return size_t(p); } - }; + struct hash : Internal::EnableHashIf> {}; template struct hash // Note that we use the pointer as-is and don't divide by sizeof(T*). This is because the table is of a prime size and this division doesn't benefit distribution. { size_t operator()(T* p) const { return size_t(uintptr_t(p)); } }; @@ -1046,14 +1040,19 @@ namespace eastl template <> struct hash { size_t operator()(unsigned char val) const { return static_cast(val); } }; + #if defined(EA_CHAR8_UNIQUE) && EA_CHAR8_UNIQUE + template <> struct hash + { size_t operator()(char8_t val) const { return static_cast(val); } }; + #endif + #if defined(EA_CHAR16_NATIVE) && EA_CHAR16_NATIVE template <> struct hash - { size_t operator()(char16_t val) const { return static_cast(val); } }; + { size_t operator()(char16_t val) const { return static_cast(val); } }; #endif #if defined(EA_CHAR32_NATIVE) && EA_CHAR32_NATIVE template <> struct hash - { size_t operator()(char32_t val) const { return static_cast(val); } }; + { size_t operator()(char32_t val) const { return static_cast(val); } }; #endif // If wchar_t is a native type instead of simply a define to an existing type... @@ -1095,6 +1094,11 @@ namespace eastl template <> struct hash { size_t operator()(long double val) const { return static_cast(val); } }; + #if defined(EA_HAVE_INT128) && EA_HAVE_INT128 + template <> struct hash + { size_t operator()(uint128_t val) const { return static_cast(val); } }; + #endif + /////////////////////////////////////////////////////////////////////////// // string hashes diff --git a/include/EASTL/internal/allocator_traits.h b/include/EASTL/internal/allocator_traits.h deleted file mode 100644 index 87985f0e..00000000 --- a/include/EASTL/internal/allocator_traits.h +++ /dev/null @@ -1,347 +0,0 @@ -///////////////////////////////////////////////////////////////////////////// -// Copyright (c) Electronic Arts Inc. All rights reserved. -///////////////////////////////////////////////////////////////////////////// - -//////////////////////////////////////////////////////////////////////////////// -// The code in this file is a modification of the libcxx implementation. We copy -// the license information here as required. -//////////////////////////////////////////////////////////////////////////////// -//===------------------------ functional ----------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include -#include - -namespace eastl -{ - namespace Internal - { - // has_value_type - template - struct has_value_type - { - private: - template static eastl::no_type test(...); - template static eastl::yes_type test(typename U::value_type* = 0); - public: - static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); - }; - - template ::value> - struct value_type - { - typedef typename Alloc::value_type type; - }; - - template - struct value_type - { - typedef char type; - }; - - - // has_pointer_type - namespace has_pointer_type_imp - { - template static eastl::no_type test(...); - template static eastl::yes_type test(typename U::pointer* = 0); - } - - template - struct has_pointer_type - : public integral_constant(0)) == sizeof(eastl::yes_type)> - { - }; - - namespace PointerTypeInternal - { - template ::value> - struct pointer_type - { - typedef typename D::pointer type; - }; - - template - struct pointer_type - { - typedef T* type; - }; - } - - template - struct pointer_type - { - typedef typename PointerTypeInternal::pointer_type::type>::type type; - }; - - - // has_const_pointer - template - struct has_const_pointer - { - private: - template static eastl::no_type test(...); - template static eastl::yes_type test(typename U::const_pointer* = 0); - public: - static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); - }; - - template ::value> - struct const_pointer - { - typedef typename Alloc::const_pointer type; - }; - - template - struct const_pointer - { - #ifndef EA_COMPILER_NO_TEMPLATE_ALIASES - typedef typename pointer_traits::template rebind type; - #else - typedef typename pointer_traits::template rebind::other type; - #endif - }; - - - // has_void_pointer - template - struct has_void_pointer - { - private: - template static eastl::no_type test(...); - template static eastl::yes_type test(typename U::void_pointer* = 0); - public: - static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); - }; - - template ::value> - struct void_pointer - { - typedef typename Alloc::void_pointer type; - }; - - template - struct void_pointer - { - #ifndef EA_COMPILER_NO_TEMPLATE_ALIASES - typedef typename pointer_traits::template rebind type; - #else - typedef typename pointer_traits::template rebind::other type; - #endif - }; - - - // has_const_void_pointer - template - struct has_const_void_pointer - { - private: - template static eastl::no_type test(...); - template static eastl::yes_type test(typename U::const_void_pointer* = 0); - public: - static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); - }; - - template ::value> - struct const_void_pointer - { - typedef typename Alloc::const_void_pointer type; - }; - - template - struct const_void_pointer - { - #ifndef EA_COMPILER_NO_TEMPLATE_ALIASES - typedef typename pointer_traits::template rebind type; - #else - typedef typename pointer_traits::template rebind::other type; - #endif - }; - - - // alloc_traits_difference_type - template ::value> - struct alloc_traits_difference_type - { - typedef typename pointer_traits::difference_type type; - }; - - template - struct alloc_traits_difference_type - { - typedef typename Alloc::difference_type type; - }; - - - // has_size_type - template - struct has_size_type - { - private: - template static eastl::no_type test(...); - template static char test(typename U::size_type* = 0); - public: - static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); - }; - - template ::value> - struct size_type - { - typedef typename make_unsigned::type type; - }; - - template - struct size_type - { - typedef typename Alloc::size_type type; - }; - - - // has_construct - template - decltype(eastl::declval().construct(eastl::declval(), eastl::declval()...), eastl::true_type()) - has_construct_test(Alloc&& a, T* p, Args&&... args); - - template - eastl::false_type has_construct_test(const Alloc& a, Pointer&& p, Args&&... args); - - template - struct has_construct - : public eastl::integral_constant< bool, - eastl::is_same(), eastl::declval(), - eastl::declval()...)), - eastl::true_type>::value> - { - }; - - - // has_destroy - template - auto has_destroy_test(Alloc&& a, Pointer&& p) -> decltype(a.destroy(p), eastl::true_type()); - - template - auto has_destroy_test(const Alloc& a, Pointer&& p) -> eastl::false_type; - - template - struct has_destroy - : public eastl::integral_constant< bool, - is_same(), eastl::declval())), eastl::true_type>::value> - { - }; - - - // has_max_size - template - auto has_max_size_test(Alloc&& a) -> decltype(a.max_size(), eastl::true_type()); - - template - auto has_max_size_test(const volatile Alloc& a) -> eastl::false_type; - - template - struct has_max_size - : public eastl::integral_constant())), eastl::true_type>::value> - { - }; - - } // namespace Internal - - - //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - // allocator_traits - // - // C++11 Standard section 20.7.8 - // This Internal namespace holds the utility functions required for allocator_traits to do compile-time type - // inspection inorder to determine if needs to provide a default implementation or utilize the users allocator - // implementation. - // - // Reference: http://en.cppreference.com/w/cpp/memory/allocator_traits - // - // eastl::allocator_traits supplies a uniform interface to all allocator types. - // - //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - - //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - // eastl::allocator_traits is not a standards conforming implementation. Enough of the standard was implemented to - // make the eastl::function implementation possible. We must revisit this implementation before rolling out its - // usage fully in eastl::containers. - // - // NOTE: We do not recommend users directly code against eastl::allocator_traits until we have completed a full standards comforming implementation. - //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - template - struct allocator_traits - { - typedef Alloc allocator_type; - - typedef typename Internal::value_type::type value_type; - typedef typename Internal::pointer_type::type pointer; - typedef typename Internal::const_pointer::type const_pointer; - typedef typename Internal::void_pointer::type void_pointer; - typedef typename Internal::const_void_pointer::type const_void_pointer; - typedef typename Internal::alloc_traits_difference_type::type difference_type; - typedef typename Internal::size_type::type size_type; - - // - // TODO: for full standards compliance implement the following: - // - // typedef typename Internal::propagate_on_container_copy_assignment::type propagate_on_container_copy_assignment; - // typedef typename Internal::propagate_on_container_move_assignment::type propagate_on_container_move_assignment; - // typedef typename Internal::propagate_on_container_swap::type propagate_on_container_swap; - // template using rebind_alloc = Alloc::rebind::other | Alloc; - // template using rebind_traits = allocator_traits>; - // static allocator_type select_on_container_copy_construction(const allocator_type& a); - - static size_type internal_max_size(true_type, const allocator_type& a) { return a.max_size(); } - static size_type internal_max_size(false_type, const allocator_type&) { return (eastl::numeric_limits::max)(); } // additional parenthesis disables the windows max macro from expanding. - static size_type max_size(const allocator_type& a) EA_NOEXCEPT - { - return internal_max_size(Internal::has_max_size(), a); - } - - static pointer allocate(allocator_type& a, size_type n) { return static_cast(a.allocate(n)); } - - static pointer allocate(allocator_type& a, size_type n, const_void_pointer) - { - // return allocate(a, n, hint, Internal::has_allocate_hint()); - return allocate(a, n); - } - - static void deallocate(allocator_type& a, pointer p, size_type n) EA_NOEXCEPT { a.deallocate(p, n); } - - template - static void internal_construct(eastl::true_type, allocator_type& a, T* p, Args&&... args) - { - a.construct(p, eastl::forward(args)...); - } - - template - static void internal_construct(false_type, allocator_type&, T* p, Args&&... args) - { - ::new ((void*)p) T(eastl::forward(args)...); - } - - template - static void construct(allocator_type& a, T* p, Args&&... args) - { - internal_construct(Internal::has_construct(), a, p, eastl::forward(args)...); - } - - template - static void internal_destroy(eastl::true_type, allocator_type& a, T* p) { a.destroy(p); } - - template - static void internal_destroy(eastl::false_type, allocator_type&, T* p) { EA_UNUSED(p); p->~T(); } - - template - static void destroy(allocator_type& a, T* p) - { - internal_destroy(Internal::has_destroy(), a, p); - } - }; -} // namespace eastl diff --git a/include/EASTL/internal/allocator_traits_fwd_decls.h b/include/EASTL/internal/allocator_traits_fwd_decls.h deleted file mode 100644 index d6283cf4..00000000 --- a/include/EASTL/internal/allocator_traits_fwd_decls.h +++ /dev/null @@ -1,40 +0,0 @@ -///////////////////////////////////////////////////////////////////////////// -// Copyright (c) Electronic Arts Inc. All rights reserved. -///////////////////////////////////////////////////////////////////////////// - - -#ifndef EASTL_INTERNAL_ALLOCATOR_TRAITS_H -#define EASTL_INTERNAL_ALLOCATOR_TRAITS_H - - -#include -#if defined(EA_PRAGMA_ONCE_SUPPORTED) - #pragma once -#endif - -#include -#include - -namespace eastl -{ - template - struct allocator_traits; - -} // namespace eastl - -#endif // Header include guard - - - - - - - - - - - - - - - diff --git a/include/EASTL/internal/atomic/arch/arch.h b/include/EASTL/internal/atomic/arch/arch.h new file mode 100644 index 00000000..4924a591 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch.h @@ -0,0 +1,65 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// Include the architecture specific implementations +// +#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64) + + #include "x86/arch_x86.h" + +#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64) + + #include "arm/arch_arm.h" + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "arch_fetch_add.h" +#include "arch_fetch_sub.h" + +#include "arch_fetch_and.h" +#include "arch_fetch_xor.h" +#include "arch_fetch_or.h" + +#include "arch_add_fetch.h" +#include "arch_sub_fetch.h" + +#include "arch_and_fetch.h" +#include "arch_xor_fetch.h" +#include "arch_or_fetch.h" + +#include "arch_exchange.h" + +#include "arch_cmpxchg_weak.h" +#include "arch_cmpxchg_strong.h" + +#include "arch_load.h" +#include "arch_store.h" + +#include "arch_compiler_barrier.h" + +#include "arch_cpu_pause.h" + +#include "arch_memory_barrier.h" + +#include "arch_signal_fence.h" + +#include "arch_thread_fence.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_add_fetch.h b/include/EASTL/internal/atomic/arch/arch_add_fetch.h new file mode 100644 index 00000000..65771f89 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_add_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_and_fetch.h b/include/EASTL/internal/atomic/arch/arch_and_fetch.h new file mode 100644 index 00000000..df7ba35d --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_and_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128) + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h b/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h new file mode 100644 index 00000000..1005dc33 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h @@ -0,0 +1,430 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h b/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h new file mode 100644 index 00000000..5ce26386 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h @@ -0,0 +1,430 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) + + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128_AVAILABLE \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE +#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h b/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h new file mode 100644 index 00000000..0652469b --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h @@ -0,0 +1,19 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_ARCH_ATOMIC_COMPILER_BARRIER_AVAILABLE 0 + +#define EASTL_ARCH_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 0 + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_cpu_pause.h b/include/EASTL/internal/atomic/arch/arch_cpu_pause.h new file mode 100644 index 00000000..e8c2d1d7 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_cpu_pause.h @@ -0,0 +1,25 @@ +///////////////////////////////////////////////////////////////////////////////// +// copyright (c) electronic arts inc. all rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_PAUSE() +// +#if defined(EASTL_ARCH_ATOMIC_CPU_PAUSE) + #define EASTL_ARCH_ATOMIC_CPU_PAUSE_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CPU_PAUSE_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_exchange.h b/include/EASTL/internal/atomic/arch/arch_exchange.h new file mode 100644 index 00000000..76003188 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_exchange.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128) + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_fetch_add.h b/include/EASTL/internal/atomic/arch/arch_fetch_add.h new file mode 100644 index 00000000..71907f70 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_fetch_add.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_fetch_and.h b/include/EASTL/internal/atomic/arch/arch_fetch_and.h new file mode 100644 index 00000000..f2b39a4c --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_fetch_and.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128) + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_fetch_or.h b/include/EASTL/internal/atomic/arch/arch_fetch_or.h new file mode 100644 index 00000000..dd6dd0db --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_fetch_or.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128) + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_fetch_sub.h b/include/EASTL/internal/atomic/arch/arch_fetch_sub.h new file mode 100644 index 00000000..ea63db73 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_fetch_sub.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_fetch_xor.h b/include/EASTL/internal/atomic/arch/arch_fetch_xor.h new file mode 100644 index 00000000..b41ad2d4 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_fetch_xor.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_load.h b/include/EASTL/internal/atomic/arch/arch_load.h new file mode 100644 index 00000000..eea7cf49 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_load.h @@ -0,0 +1,125 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H +#define EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// +#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_8) + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_16) + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_32) + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32) + #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_64) + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64) + #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_128) + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_memory_barrier.h b/include/EASTL/internal/atomic/arch/arch_memory_barrier.h new file mode 100644 index 00000000..c6cc6bfc --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_memory_barrier.h @@ -0,0 +1,47 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_MB() +// +#if defined(EASTL_ARCH_ATOMIC_CPU_MB) + #define EASTL_ARCH_ATOMIC_CPU_MB_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CPU_MB_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_WMB() +// +#if defined(EASTL_ARCH_ATOMIC_CPU_WMB) + #define EASTL_ARCH_ATOMIC_CPU_WMB_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CPU_WMB_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_RMB() +// +#if defined(EASTL_ARCH_ATOMIC_CPU_RMB) + #define EASTL_ARCH_ATOMIC_CPU_RMB_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_CPU_RMB_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_or_fetch.h b/include/EASTL/internal/atomic/arch/arch_or_fetch.h new file mode 100644 index 00000000..110326b4 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_or_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128) + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_signal_fence.h b/include/EASTL/internal/atomic/arch/arch_signal_fence.h new file mode 100644 index 00000000..65b64fc2 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_signal_fence.h @@ -0,0 +1,21 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 0 +#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 0 +#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 0 +#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 0 +#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 0 + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_store.h b/include/EASTL/internal/atomic/arch/arch_store.h new file mode 100644 index 00000000..9a4112cb --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_store.h @@ -0,0 +1,113 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_STORE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_8) + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_8) + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_16) + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_16) + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_32) + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_32) + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_64) + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_64) + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_128) + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_128) + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_STORE_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_sub_fetch.h b/include/EASTL/internal/atomic/arch/arch_sub_fetch.h new file mode 100644 index 00000000..20241b14 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_sub_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_thread_fence.h b/include/EASTL/internal/atomic/arch/arch_thread_fence.h new file mode 100644 index 00000000..676fbf19 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_thread_fence.h @@ -0,0 +1,49 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*() +// +#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED) + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE) + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE) + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL) + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST) + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H */ diff --git a/include/EASTL/internal/atomic/arch/arch_xor_fetch.h b/include/EASTL/internal/atomic/arch/arch_xor_fetch.h new file mode 100644 index 00000000..63548c22 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arch_xor_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128) + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H */ diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm.h b/include/EASTL/internal/atomic/arch/arm/arch_arm.h new file mode 100644 index 00000000..0349a42f --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arm/arch_arm.h @@ -0,0 +1,89 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +/** + * NOTE: We use this mapping + * + * ARMv7 Mapping 'trailing sync;': + * + * Load Relaxed : ldr + * Load Acquire : ldr; dmb ish + * Load Seq_Cst : ldr; dmb ish + * + * Store Relaxed : str + * Store Release : dmb ish; str + * Store Seq_Cst : dmb ish; str; dmb ish + * + * Relaxed Fence : + * Acquire Fence : dmb ish + * Release Fence : dmb ish + * Acq_Rel Fence : dmb ish + * Seq_Cst Fence : dmb ish + */ + +/** + * ARMv7 Mapping 'leading sync;'; + * + * Load Relaxed : ldr + * Load Acquire : ldr; dmb ish + * Load Seq_Cst : dmb ish; ldr; dmb ish + * + * Store Relaxed : str + * Store Release : dmb ish; str + * Store Seq_Cst : dmb ish: str + * + * Relaxed Fence : + * Acquire Fence : dmb ish + * Release Fence : dmb ish + * Acq_Rel Fence : dmb ish + * Seq_Cst Fence : dmb ish + */ + +/** + * NOTE: + * + * On ARM32/64, we use the 'trailing sync;' convention with the stricter load acquire that uses + * a dmb instead of control dependencie + isb to ensure the IRIW litmus test is satisfied + * as one reason. See EASTL/atomic.h for futher explanation and deep-dive. + * + * For ARMv8 we could move to use the new proper store release and load acquire, RCsc variant. + * All ARMv7 approaches work on ARMv8 and this code path is only used on msvc which isn't used + * heavily. Most of the ARM code will end up going thru clang or gcc since microsft arm devices + * aren't that abundant. + */ + + +///////////////////////////////////////////////////////////////////////////////// + + +#if defined(EA_COMPILER_MSVC) + + #if EA_PLATFORM_PTR_SIZE == 8 + #define EASTL_ARCH_ATOMIC_HAS_128BIT + #endif + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "arch_arm_load.h" +#include "arch_arm_store.h" + +#include "arch_arm_memory_barrier.h" + +#include "arch_arm_thread_fence.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_H */ diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h b/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h new file mode 100644 index 00000000..a6d0e421 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h @@ -0,0 +1,164 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// +#if defined(EA_COMPILER_MSVC) + + + /** + * NOTE: + * + * Even 8-byte aligned 64-bit memory accesses on ARM32 are not + * guaranteed to be atomic on all ARM32 cpus. Only guaranteed on + * cpus with the LPAE extension. We need to use a + * ldrexd instruction in order to ensure no shearing is observed + * for all ARM32 processors. + */ + #if defined(EA_PROCESSOR_ARM32) + + #define EASTL_ARCH_ATOMIC_MSVC_ARM32_LDREXD(ret, ptr) \ + ret = __ldrexd(ptr) + + #endif + + + #define EASTL_ARCH_ATOMIC_ARM_LOAD_N(integralType, bits, type, ret, ptr) \ + { \ + integralType retIntegral; \ + retIntegral = EA_PREPROCESSOR_JOIN(__iso_volatile_load, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + + + #define EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int8, 8, type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int16, 16, type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int32, 32, type, ret, ptr) + + + #if defined(EA_PROCESSOR_ARM32) + + + #define EASTL_ARCH_ATOMIC_LOAD_64(type, ret, ptr) \ + { \ + __int64 loadRet64; \ + EASTL_ARCH_ATOMIC_MSVC_ARM32_LDREXD(loadRet64, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__int64, (ptr))); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, loadRet64); \ + } + + #else + + #define EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int64, 64, type, ret, ptr) + + #endif + + + /** + * NOTE: + * The ARM documentation states the following: + * A 64-bit pair requires the address to be quadword aligned and is single-copy atomic for each doubleword at doubleword granularity + * + * Thus we must ensure the store succeeds inorder for the load to be observed as atomic. + * Thus we must use the full cmpxchg in order to do a proper atomic load. + */ + #define EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, MemoryOrder) \ + { \ + struct BitfieldPun128 \ + { \ + __int64 value[2]; \ + }; \ + \ + struct BitfieldPun128 loadedPun = EASTL_ATOMIC_TYPE_PUN_CAST(struct BitfieldPun128, *(ptr)); \ + \ + do \ + { \ + bool cmpxchgRetBool; \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(struct BitfieldPun128, cmpxchgRetBool, \ + EASTL_ATOMIC_TYPE_CAST(struct BitfieldPun128, (ptr)), \ + &loadedPun, loadedPun); \ + } while (!cmpxchgRetBool); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, loadedPun); \ + } + + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, RELAXED) + + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, ACQUIRE) + + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, SEQ_CST) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H */ diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h b/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h new file mode 100644 index 00000000..00af29ed --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h @@ -0,0 +1,96 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#if defined(EA_COMPILER_MSVC) + + #if defined(EA_PROCESSOR_ARM32) + + #define EASTL_ARM_DMB_ISH _ARM_BARRIER_ISH + + #define EASTL_ARM_DMB_ISHST _ARM_BARRIER_ISHST + + #define EASTL_ARM_DMB_ISHLD _ARM_BARRIER_ISH + + #elif defined(EA_PROCESSOR_ARM64) + + #define EASTL_ARM_DMB_ISH _ARM64_BARRIER_ISH + + #define EASTL_ARM_DMB_ISHST _ARM64_BARRIER_ISHST + + #define EASTL_ARM_DMB_ISHLD _ARM64_BARRIER_ISHLD + + #endif + + + /** + * NOTE: + * While it makes no sense for a hardware memory barrier to not imply a compiler barrier. + * MSVC docs do not explicitly state that, so better to be safe than sorry chasing down + * hard to find bugs due to the compiler deciding to reorder things. + */ + + #define EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(option) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + __dmb(option); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + +#elif defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG) + + #define EASTL_ARM_DMB_ISH ish + + #define EASTL_ARM_DMB_ISHST ishst + + #if defined(EA_PROCESSOR_ARM32) + + #define EASTL_ARM_DMB_ISHLD ish + + #elif defined(EA_PROCESSOR_ARM64) + + #define EASTL_ARM_DMB_ISHLD ishld + + #endif + + + #define EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(option) \ + __asm__ __volatile__ ("dmb " EA_STRINGIFY(option) ::: "memory") + + +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_MB() +// +#define EASTL_ARCH_ATOMIC_CPU_MB() \ + EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISH) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_WMB() +// +#define EASTL_ARCH_ATOMIC_CPU_WMB() \ + EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISHST) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_RMB() +// +#define EASTL_ARCH_ATOMIC_CPU_RMB() \ + EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISHLD) + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H */ diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h b/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h new file mode 100644 index 00000000..ab53b9d4 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h @@ -0,0 +1,142 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) + + + #define EASTL_ARCH_ATOMIC_ARM_STORE_N(integralType, bits, type, ptr, val) \ + EA_PREPROCESSOR_JOIN(__iso_volatile_store, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val))) + + + #define EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_N(__int8, 8, type, ptr, val) + + #define EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_N(__int16, 16, type, ptr, val) + + #define EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_N(__int32, 32, type, ptr, val) + + + #if defined(EA_PROCESSOR_ARM64) + + #define EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_N(__int64, 64, type, ptr, val) + + #endif + + + #define EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, MemoryOrder) \ + { \ + type exchange128; EA_UNUSED(exchange128); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \ + } + + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, RELAXED) + + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, RELEASE) + + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) ; \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val); \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, SEQ_CST) + + + #if defined(EA_PROCESSOR_ARM32) + + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \ + { \ + type retExchange64; EA_UNUSED(retExchange64); \ + EASTL_ATOMIC_EXCHANGE_RELAXED_64(type, retExchange64, ptr, val); \ + } + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \ + { \ + type retExchange64; EA_UNUSED(retExchange64); \ + EASTL_ATOMIC_EXCHANGE_RELEASE_64(type, retExchange64, ptr, val); \ + } + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + { \ + type retExchange64; EA_UNUSED(retExchange64); \ + EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, retExchange64, ptr, val); \ + } + + + #elif defined(EA_PROCESSOR_ARM64) + + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \ + EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + EASTL_ATOMIC_CPU_MB(); \ + EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val); \ + EASTL_ATOMIC_CPU_MB() + + + #endif + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H */ diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h b/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h new file mode 100644 index 00000000..391c64e0 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h @@ -0,0 +1,37 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*() +// +#if defined(EA_COMPILER_MSVC) + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE() \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE() \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL() \ + EASTL_ATOMIC_CPU_MB() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST() \ + EASTL_ATOMIC_CPU_MB() + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86.h b/include/EASTL/internal/atomic/arch/x86/arch_x86.h new file mode 100644 index 00000000..2c782cbc --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86.h @@ -0,0 +1,159 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +/** + * x86 && x64 Mappings + * + * Load Relaxed : MOV + * Load Acquire : MOV; COMPILER_BARRIER; + * Load Seq_Cst : MOV; COMPILER_BARRIER; + * + * Store Relaxed : MOV + * Store Release : COMPILER_BARRIER; MOV; + * Store Seq_Cst : LOCK XCHG : MOV; MFENCE; + * + * Relaxed Fence : + * Acquire Fence : COMPILER_BARRIER + * Release Fence : COMPILER_BARRIER + * Acq_Rel Fence : COMPILER_BARRIER + * Seq_Cst FENCE : MFENCE + */ + + +///////////////////////////////////////////////////////////////////////////////// + + +#if defined(EA_COMPILER_MSVC) + + #if EA_PLATFORM_PTR_SIZE == 8 + #define EASTL_ARCH_ATOMIC_HAS_128BIT + #endif + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + #define EASTL_ARCH_ATOMIC_HAS_128BIT + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +/** + * NOTE: + * On 32-bit x86 CPUs Intel Pentium and newer, AMD K5 and newer + * and any i686 class of x86 CPUs support only 64-bit cmpxchg + * known as cmpxchg8b. + * On these class of cpus we can guarantee that 64-bit loads are + * also atomic by using the SSE1/SSE2 movq instructions. + * We support all other atomic operations + * on compilers that only provide this 64-bit cmpxchg instruction + * by wrapping them around the 64-bit cmpxchg8b instruction. + */ +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \ + static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!"); + + #define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val) + + + #define EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \ + { \ + bool cmpxchgRet; \ + EASTL_ATOMIC_LOAD_RELAXED_64(type, ret, ptr); \ + do \ + { \ + type computedDesired; \ + PRE_COMPUTE_DESIRED(computedDesired, ret, (val)); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _64)(type, cmpxchgRet, ptr, &(ret), computedDesired); \ + } while (!cmpxchgRet); \ + POST_COMPUTE_RET(ret, ret, (val)); \ + } + + +#endif + + +/** + * NOTE: + * 64-bit x64 CPUs support only 128-bit cmpxchg known as cmpxchg16b. + * We support all other atomic operations by wrapping them around + * the 128-bit cmpxchg16b instruction. + * 128-bit loads are only atomic if using cmpxchg16b on x64. + */ +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \ + static_assert(false, "EASTL_ARCH_ATOMIC_X64_NOP_PRE_COMPUTE_DESIRED() must be implmented!"); + + #define EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET(ret, prevObserved, val) + + + #define EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \ + { \ + bool cmpxchgRet; \ + /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \ + /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \ + /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \ + /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \ + ret = *(ptr); \ + do \ + { \ + type computedDesired; \ + PRE_COMPUTE_DESIRED(computedDesired, ret, (val)); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), computedDesired); \ + } while (!cmpxchgRet); \ + POST_COMPUTE_RET(ret, ret, (val)); \ + } + + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "arch_x86_fetch_add.h" +#include "arch_x86_fetch_sub.h" + +#include "arch_x86_fetch_and.h" +#include "arch_x86_fetch_xor.h" +#include "arch_x86_fetch_or.h" + +#include "arch_x86_add_fetch.h" +#include "arch_x86_sub_fetch.h" + +#include "arch_x86_and_fetch.h" +#include "arch_x86_xor_fetch.h" +#include "arch_x86_or_fetch.h" + +#include "arch_x86_exchange.h" + +#include "arch_x86_cmpxchg_weak.h" +#include "arch_x86_cmpxchg_strong.h" + +#include "arch_x86_memory_barrier.h" + +#include "arch_x86_thread_fence.h" + +#include "arch_x86_load.h" +#include "arch_x86_store.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h new file mode 100644 index 00000000..064f2c01 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h @@ -0,0 +1,96 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) + (val)) + + #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) + (val)) + + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) + (val)) + + #define EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) + (val)) + + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X64_ADD_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_ADD_FETCH_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h new file mode 100644 index 00000000..8c3c9327 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h @@ -0,0 +1,96 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) & (val)) + + #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) & (val)) + + + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) & (val)) + + #define EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) & (val)) + + + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X64_AND_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_AND_FETCH_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h new file mode 100644 index 00000000..8127ccef --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h @@ -0,0 +1,69 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) \ + { \ + /* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \ + __asm__ __volatile__ ("lock; cmpxchg16b %2\n" /* cmpxchg16b sets/clears ZF */ \ + "sete %3" /* If ZF == 1, set the return value to 1 */ \ + /* Output Operands */ \ + : "=a"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))), "=d"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)) + 1)), \ + "+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))), \ + "=rm"((ret)) \ + /* Input Operands */ \ + : "b"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))), "c"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)) + 1)), \ + "a"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))), "d"(*(EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)) + 1)) \ + /* Clobbers */ \ + : "memory", "cc"); \ + } + + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ARCH_ATOMIC_X64_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h new file mode 100644 index 00000000..f8b956a3 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h @@ -0,0 +1,52 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) + + #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h new file mode 100644 index 00000000..85117a87 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h @@ -0,0 +1,91 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = (val) + + + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \ + { \ + bool cmpxchgRet; \ + /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \ + /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \ + /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \ + /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \ + ret = *(ptr); \ + do \ + { \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), val); \ + } while (!cmpxchgRet); \ + } + + + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, RELAXED) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, ACQUIRE) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, RELEASE) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, ACQ_REL) + + #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_EXCHANGE_128(type, ret, ptr, val, SEQ_CST) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h new file mode 100644 index 00000000..e78c2697 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) + (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) + (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X64_FETCH_ADD_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h new file mode 100644 index 00000000..6b81b5c0 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) & (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) & (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X64_FETCH_AND_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h new file mode 100644 index 00000000..aa5bd710 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) | (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) | (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X64_FETCH_OR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h new file mode 100644 index 00000000..995011d9 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) - (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) - (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X64_FETCH_SUB_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h new file mode 100644 index 00000000..d9126281 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) ^ (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) ^ (val)) + + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X64_FETCH_XOR_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_NOP_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h new file mode 100644 index 00000000..444897de --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h @@ -0,0 +1,159 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// +#if defined(EA_COMPILER_MSVC) + + + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1920) // >= VS2019 + + #define EASTL_ARCH_ATOMIC_X86_LOAD_N(integralType, bits, type, ret, ptr) \ + { \ + integralType retIntegral; \ + retIntegral = EA_PREPROCESSOR_JOIN(__iso_volatile_load, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + + #else + + #define EASTL_ARCH_ATOMIC_X86_LOAD_N(integralType, bits, type, ret, ptr) \ + { \ + integralType retIntegral; \ + retIntegral = *(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + + #endif + + + #define EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, MemoryOrder) \ + { \ + struct BitfieldPun128 \ + { \ + __int64 value[2]; \ + }; \ + \ + struct BitfieldPun128 expectedPun{0, 0}; \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expectedPun); \ + \ + bool cmpxchgRetBool; EA_UNUSED(cmpxchgRetBool); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, ptr, &(ret), ret); \ + } + + + #define EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_N(__int8, 8, type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_N(__int16, 16, type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_N(__int32, 32, type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_N(__int64, 64, type, ret, ptr) + + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr) + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, RELAXED) + + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, ACQUIRE); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, SEQ_CST); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, MemoryOrder) \ + { \ + __uint128_t expected = 0; \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expected); \ + \ + bool cmpxchgRetBool; EA_UNUSED(cmpxchgRetBool); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, ptr, &(ret), ret); \ + } + + + #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, RELAXED) + + #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, ACQUIRE) + + #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ + EASTL_ARCH_ATOMIC_X64_LOAD_128(type, ret, ptr, SEQ_CST) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h new file mode 100644 index 00000000..78eba24d --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h @@ -0,0 +1,102 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_MB() +// +#if defined(EA_COMPILER_MSVC) + + /** + * NOTE: + * While it makes no sense for a hardware memory barrier to not imply a compiler barrier. + * MSVC docs do not explicitly state that, so better to be safe than sorry chasing down + * hard to find bugs due to the compiler deciding to reorder things. + */ + + #if 1 + + #define EASTL_ARCH_ATOMIC_CPU_MB() \ + { \ + EA_DISABLE_VC_WARNING(4456); \ + volatile long _; \ + _InterlockedExchangeAdd(&_, 0); \ + EA_RESTORE_VC_WARNING(); \ + } + + #else + + #define EASTL_ARCH_ATOMIC_CPU_MB() \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + _mm_mfence(); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #endif + +#elif defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG) + + /** + * NOTE: + * + * mfence orders all loads/stores to/from all memory types. + * We only care about ordinary cacheable memory so lighter weight locked instruction + * is far faster than a mfence to get a full memory barrier. + * lock; addl against the top of the stack is good because: + * distinct for every thread so prevents false sharing + * that cacheline is most likely cache hot + * + * We intentionally do it below the stack pointer to avoid false RAW register dependencies, + * in cases where the compiler reads from the stack pointer after the lock; addl instruction + * + * Accounting for Red Zones or Cachelines doesn't provide extra benefit. + */ + + #if defined(EA_PROCESSOR_X86) + + #define EASTL_ARCH_ATOMIC_CPU_MB() \ + __asm__ __volatile__ ("lock; addl $0, -4(%%esp)" ::: "memory", "cc") + + #elif defined(EA_PROCESSOR_X86_64) + + #define EASTL_ARCH_ATOMIC_CPU_MB() \ + __asm__ __volatile__ ("lock; addl $0, -8(%%rsp)" ::: "memory", "cc") + + #else + + #define EASTL_ARCH_ATOMIC_CPU_MB() \ + __asm__ __volatile__ ("mfence" ::: "memory") + + #endif + + +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_WMB() +// +#define EASTL_ARCH_ATOMIC_CPU_WMB() \ + EASTL_ATOMIC_COMPILER_BARRIER() + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_CPU_RMB() +// +#define EASTL_ARCH_ATOMIC_CPU_RMB() \ + EASTL_ATOMIC_COMPILER_BARRIER() + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h new file mode 100644 index 00000000..2007c66a --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h @@ -0,0 +1,96 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) | (val)) + + #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) | (val)) + + + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) | (val)) + + #define EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) | (val)) + + + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X64_OR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_OR_FETCH_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h new file mode 100644 index 00000000..39a0c047 --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h @@ -0,0 +1,171 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) + + + #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1920) // >= VS2019 + + #define EASTL_ARCH_ATOMIC_X86_STORE_N(integralType, bits, type, ptr, val) \ + EA_PREPROCESSOR_JOIN(__iso_volatile_store, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val))) + + #else + + #define EASTL_ARCH_ATOMIC_X86_STORE_N(integralType, bits, type, ptr, val) \ + { \ + integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \ + \ + *(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))) = valIntegral; \ + } + + #endif + + + #define EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, MemoryOrder) \ + { \ + type exchange128; EA_UNUSED(exchange128); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \ + } + + + #define EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_N(__int8, 8, type, ptr, val) + + #define EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_N(__int16, 16, type, ptr, val) + + #define EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_N(__int32, 32, type, ptr, val) + + #define EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_N(__int64, 64, type, ptr, val) + + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, RELAXED) + + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8(type, ptr, val) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16(type, ptr, val) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32(type, ptr, val) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, RELEASE) + + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \ + { \ + type exchange8; EA_UNUSED(exchange8); \ + EASTL_ATOMIC_EXCHANGE_SEQ_CST_8(type, exchange8, ptr, val); \ + } + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \ + { \ + type exchange16; EA_UNUSED(exchange16); \ + EASTL_ATOMIC_EXCHANGE_SEQ_CST_16(type, exchange16, ptr, val); \ + } + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \ + { \ + type exchange32; EA_UNUSED(exchange32); \ + EASTL_ATOMIC_EXCHANGE_SEQ_CST_32(type, exchange32, ptr, val); \ + } + + + /** + * NOTE: + * + * Since 64-bit exchange is wrapped around a cmpxchg8b on 32-bit, it is + * faster to just do a mov; mfence. + */ + #if defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + EASTL_ATOMIC_COMPILER_BARRIER(); \ + EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val); \ + EASTL_ATOMIC_CPU_MB() + + + #elif defined(EA_PROCESSOR_X86_64) + + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + { \ + type exchange64; EA_UNUSED(exchange64); \ + EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, exchange64, ptr, val); \ + } + + + #endif + + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, SEQ_CST) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, MemoryOrder) \ + { \ + type exchange128; EA_UNUSED(exchange128); \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \ + } + + + #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, RELAXED) + + #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, RELEASE) + + #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_STORE_128(type, ptr, val, SEQ_CST) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h new file mode 100644 index 00000000..c300816e --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h @@ -0,0 +1,96 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) - (val)) + + #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) - (val)) + + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) - (val)) + + #define EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) - (val)) + + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X64_SUB_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_SUB_FETCH_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h new file mode 100644 index 00000000..fe3bd58c --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h @@ -0,0 +1,42 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*() +// +#if defined(EA_COMPILER_MSVC) + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE() \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE() \ + EASTL_ATOMIC_COMPILER_BARRIER() + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL() \ + EASTL_ATOMIC_COMPILER_BARRIER() + +#endif + + +#if defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC) + + #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST() \ + EASTL_ATOMIC_CPU_MB() + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H */ diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h new file mode 100644 index 00000000..37ac843f --- /dev/null +++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h @@ -0,0 +1,96 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ARCH_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86) + + + #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) ^ (val)) + + #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) ^ (val)) + + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET) + + +#endif + + +#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)) + + + #define EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \ + ret = ((observed) ^ (val)) + + #define EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \ + ret = ((prevObserved) ^ (val)) + + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELAXED, \ + EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \ + EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, RELEASE, \ + EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \ + EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET) + + #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ARCH_ATOMIC_X64_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \ + EASTL_ARCH_ATOMIC_X64_XOR_FETCH_PRE_COMPUTE_DESIRED, \ + EASTL_ARCH_ATOMIC_X64_XOR_FETCH_POST_COMPUTE_RET) + + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H */ diff --git a/include/EASTL/internal/atomic/atomic.h b/include/EASTL/internal/atomic/atomic.h new file mode 100644 index 00000000..7684d6df --- /dev/null +++ b/include/EASTL/internal/atomic/atomic.h @@ -0,0 +1,250 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_H +#define EASTL_ATOMIC_INTERNAL_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#include +#include +#include + +#include "atomic_macros.h" +#include "atomic_casts.h" + +#include "atomic_memory_order.h" +#include "atomic_asserts.h" + +#include "atomic_size_aligned.h" +#include "atomic_base_width.h" + +#include "atomic_integral.h" + +#include "atomic_pointer.h" + + +///////////////////////////////////////////////////////////////////////////////// + + +/** + * NOTE: + * All of the actual implemention is done via the ATOMIC_MACROS in the compiler or arch sub folders. + * The C++ code is merely boilerplate around these macros that actually implement the atomic operations. + * The C++ boilerplate is also hidden behind macros. + * This may seem more complicated but this is all meant to reduce copy-pasting and to ensure all operations + * all end up going down to one macro that does the actual implementation. + * The reduced code duplication makes it easier to verify the implementation and reason about it. + * Ensures we do not have to re-implement the same code for compilers that do not support generic builtins such as MSVC. + * Ensures if we ever have to implement a new platform, only the low-level leaf macros have to be implemented, everything else will be generated for you. + */ + + +#include "atomic_push_compiler_options.h" + + +namespace eastl +{ + + +namespace internal +{ + + + template + struct is_atomic_lockfree_size + { + static EASTL_CPP17_INLINE_VARIABLE constexpr bool value = false || + #if defined(EASTL_ATOMIC_HAS_8BIT) + sizeof(T) == 1 || + #endif + #if defined(EASTL_ATOMIC_HAS_16BIT) + sizeof(T) == 2 || + #endif + #if defined(EASTL_ATOMIC_HAS_32BIT) + sizeof(T) == 4 || + #endif + #if defined(EASTL_ATOMIC_HAS_64BIT) + sizeof(T) == 8 || + #endif + #if defined(EASTL_ATOMIC_HAS_128BIT) + sizeof(T) == 16 || + #endif + false; + }; + + + template + struct is_user_type_constrained + { + static EASTL_CPP17_INLINE_VARIABLE constexpr bool value = + eastl::is_trivially_copyable::value && eastl::is_copy_constructible::value && eastl::is_move_constructible::value && + eastl::is_copy_assignable::value && eastl::is_move_assignable::value; + }; + + + template + struct is_user_type_suitable_for_primary_template + { + static EASTL_CPP17_INLINE_VARIABLE constexpr bool value = eastl::internal::is_atomic_lockfree_size::value && eastl::internal::is_user_type_constrained::value; + }; + + + template + using select_atomic_inherit_0 = typename eastl::conditional || eastl::internal::is_user_type_suitable_for_primary_template::value, + eastl::internal::atomic_base_width, /* True */ + eastl::internal::atomic_invalid_type /* False */ + >::type; + + template + using select_atomic_inherit = select_atomic_inherit_0; + + +} // namespace internal + + +#define EASTL_ATOMIC_CLASS_IMPL(type, base, valueType, differenceType) \ + private: \ + \ + EASTL_ATOMIC_STATIC_ASSERT_TYPE(type); \ + \ + using Base = base; \ + \ + public: \ + \ + typedef valueType value_type; \ + typedef differenceType difference_type; \ + \ + public: \ + \ + static EASTL_CPP17_INLINE_VARIABLE constexpr bool is_always_lock_free = eastl::internal::is_atomic_lockfree_size::value; \ + \ + public: /* deleted ctors && assignment operators */ \ + \ + atomic(const atomic&) EA_NOEXCEPT = delete; \ + \ + atomic& operator =(const atomic&) EA_NOEXCEPT = delete; \ + atomic& operator =(const atomic&) volatile EA_NOEXCEPT = delete; \ + \ + public: /* ctors */ \ + \ + atomic(type desired) EA_NOEXCEPT \ + : Base{ desired } \ + { \ + } \ + \ + atomic() EA_NOEXCEPT = default; \ + \ + public: \ + \ + bool is_lock_free() const EA_NOEXCEPT \ + { \ + return eastl::internal::is_atomic_lockfree_size::value; \ + } \ + \ + bool is_lock_free() const volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(type); \ + return false; \ + } + + + +#define EASTL_ATOMIC_USING_ATOMIC_BASE(type) \ + public: \ + using Base::operator=; \ + using Base::store; \ + using Base::load; \ + using Base::exchange; \ + using Base::compare_exchange_weak; \ + using Base::compare_exchange_strong; \ + \ + public: \ + \ + operator type() const volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } \ + \ + operator type() const EA_NOEXCEPT \ + { \ + return load(eastl::memory_order_seq_cst); \ + } + + +#define EASTL_ATOMIC_USING_ATOMIC_INTEGRAL() \ + public: \ + using Base::fetch_add; \ + using Base::add_fetch; \ + using Base::fetch_sub; \ + using Base::sub_fetch; \ + using Base::fetch_and; \ + using Base::and_fetch; \ + using Base::fetch_or; \ + using Base::or_fetch; \ + using Base::fetch_xor; \ + using Base::xor_fetch; \ + using Base::operator++; \ + using Base::operator--; \ + using Base::operator+=; \ + using Base::operator-=; \ + using Base::operator&=; \ + using Base::operator|=; \ + using Base::operator^=; + + +#define EASTL_ATOMIC_USING_ATOMIC_POINTER() \ + public: \ + using Base::fetch_add; \ + using Base::add_fetch; \ + using Base::fetch_sub; \ + using Base::sub_fetch; \ + using Base::operator++; \ + using Base::operator--; \ + using Base::operator+=; \ + using Base::operator-=; + + +template +struct atomic : protected eastl::internal::select_atomic_inherit +{ + EASTL_ATOMIC_CLASS_IMPL(T, eastl::internal::select_atomic_inherit, T, T) + + EASTL_ATOMIC_USING_ATOMIC_BASE(T) +}; + + +template +struct atomic && !eastl::is_same_v>> : protected eastl::internal::atomic_integral_width +{ + EASTL_ATOMIC_CLASS_IMPL(T, eastl::internal::atomic_integral_width, T, T) + + EASTL_ATOMIC_USING_ATOMIC_BASE(T) + + EASTL_ATOMIC_USING_ATOMIC_INTEGRAL() +}; + + +template +struct atomic : protected eastl::internal::atomic_pointer_width +{ + EASTL_ATOMIC_CLASS_IMPL(T*, eastl::internal::atomic_pointer_width, T*, ptrdiff_t) + + EASTL_ATOMIC_USING_ATOMIC_BASE(T*) + + EASTL_ATOMIC_USING_ATOMIC_POINTER() +}; + + +} // namespace eastl + + +#include "atomic_pop_compiler_options.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_H */ diff --git a/include/EASTL/internal/atomic/atomic_asserts.h b/include/EASTL/internal/atomic/atomic_asserts.h new file mode 100644 index 00000000..c561ccb0 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_asserts.h @@ -0,0 +1,70 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H +#define EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(type) \ + static_assert(!eastl::is_same::value, "eastl::atomic : volatile eastl::atomic is not what you expect! Read the docs in EASTL/atomic.h! Use the memory orders to access the atomic object!"); + +#define EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(type) \ + static_assert(!eastl::is_same::value, "eastl::atomic : invalid memory order for the given operation!"); + +#define EASTL_ATOMIC_STATIC_ASSERT_TYPE(type) \ + static_assert(!eastl::is_const::value, "eastl::atomic : Template Typename T cannot be const!"); \ + static_assert(!eastl::is_volatile::value, "eastl::atomic : Template Typename T cannot be volatile! Use the memory orders to access the underlying type for the guarantees you need."); \ + static_assert(eastl::is_trivially_destructible::value, "eastl::atomic : Must be trivially destructible!"); \ + static_assert(eastl::is_standard_layout::value, "eastl::atomic : Must have standard layout!"); \ + static_assert(eastl::is_copy_constructible::value, "eastl::atomic : Template Typename T must be copy constructible!"); \ + static_assert(eastl::is_move_constructible::value, "eastl::atomic : Template Typename T must be move constructible!"); \ + static_assert(eastl::is_copy_assignable::value, "eastl::atomic : Template Typename T must be copy assignable!"); \ + static_assert(eastl::is_move_assignable::value, "eastl::atomic : Template Typename T must be move assignable!"); \ + static_assert(eastl::internal::is_atomic_lockfree_size::value, "eastl::atomic : Template Typename T must be a lockfree size!"); \ + static_assert(eastl::is_trivially_copyable::value, "eastl::atomci : Template Typename T must be trivially copyable!"); \ + static_assert(eastl::is_nothrow_default_constructible::value, "eastl::atomic : Template Typename T must be nothrow default constructible!"); + +#define EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(type) \ + static_assert(eastl::is_object::value, "eastl::atomic : Template Typename T must be an object type!"); + +#define EASTL_ATOMIC_ASSERT_ALIGNED(alignment) \ + EASTL_ASSERT((alignment & (alignment - 1)) == 0); \ + EASTL_ASSERT((reinterpret_cast(this) & (alignment - 1)) == 0) + + +namespace eastl +{ + + +namespace internal +{ + + + template + struct atomic_invalid_type + { + /** + * class Test { int i; int j; int k; }; sizeof(Test2) == 96 bits + * + * std::atomic allows non-primitive types to be used for the template type. + * This causes the api to degrade to locking for types that cannot fit into the lockfree size + * of the target platform such as std::atomic leading to performance traps. + */ + + static_assert(!eastl::is_same::value, "eastl::atomic invalid template type T!"); + }; + + +} // namespace internal + + +} // namespace eastl + + +#endif /* EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H */ diff --git a/include/EASTL/internal/atomic/atomic_base_width.h b/include/EASTL/internal/atomic/atomic_base_width.h new file mode 100644 index 00000000..d88d6b85 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_base_width.h @@ -0,0 +1,315 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H +#define EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#include "atomic_push_compiler_options.h" + + +namespace eastl +{ + + +namespace internal +{ + + + template + struct atomic_base_width; + + +#define EASTL_ATOMIC_STORE_FUNC_IMPL(op, bits) \ + EA_PREPROCESSOR_JOIN(op, bits)(T, this->GetAtomicAddress(), desired); + +#define EASTL_ATOMIC_LOAD_FUNC_IMPL(op, bits) \ + T retVal; \ + EA_PREPROCESSOR_JOIN(op, bits)(T, retVal, this->GetAtomicAddress()); \ + return retVal; + +#define EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(op, bits) \ + T retVal; \ + EA_PREPROCESSOR_JOIN(op, bits)(T, retVal, this->GetAtomicAddress(), desired); \ + return retVal; + +#define EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(op, bits) \ + bool retVal; \ + EA_PREPROCESSOR_JOIN(op, bits)(T, retVal, this->GetAtomicAddress(), &expected, desired); \ + return retVal; + +#define EASTL_ATOMIC_BASE_OP_JOIN(fetchOp, Order) \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, fetchOp), Order) + +#define EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(funcName, cmpxchgOp, bits) \ + using Base::funcName; \ + \ + bool funcName(T& expected, T desired) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_release_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELEASE_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_relaxed_s, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELAXED_RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acquire_s, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acquire_s, \ + eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_ACQUIRE_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_release_s, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELEASE_RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acq_rel_s, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_acq_rel_s, \ + eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_ACQUIRE_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_seq_cst_s, \ + eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_RELAXED_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_seq_cst_s, \ + eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_ACQUIRE_), bits); \ + } \ + \ + bool funcName(T& expected, T desired, \ + eastl::internal::memory_order_seq_cst_s, \ + eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_SEQ_CST_), bits); \ + } + +#define EASTL_ATOMIC_BASE_CMPXCHG_WEAK_FUNCS_IMPL(bits) \ + EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(compare_exchange_weak, CMPXCHG_WEAK, bits) + +#define EASTL_ATOMIC_BASE_CMPXCHG_STRONG_FUNCS_IMPL(bits) \ + EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(compare_exchange_strong, CMPXCHG_STRONG, bits) + + +#define EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(bytes, bits) \ + template \ + struct atomic_base_width : public atomic_size_aligned \ + { \ + private: \ + \ + static_assert(EA_ALIGN_OF(atomic_size_aligned) == bytes, "eastl::atomic must be sizeof(T) aligned!"); \ + static_assert(EA_ALIGN_OF(atomic_size_aligned) == sizeof(T), "eastl::atomic must be sizeof(T) aligned!"); \ + using Base = atomic_size_aligned; \ + \ + public: /* ctors */ \ + \ + atomic_base_width(T desired) EA_NOEXCEPT \ + : Base{ desired } \ + { \ + } \ + \ + atomic_base_width() EA_NOEXCEPT = default; \ + \ + atomic_base_width(const atomic_base_width&) EA_NOEXCEPT = delete; \ + \ + public: /* store */ \ + \ + using Base::store; \ + \ + void store(T desired) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_SEQ_CST_, bits); \ + } \ + \ + void store(T desired, eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_RELAXED_, bits); \ + } \ + \ + void store(T desired, eastl::internal::memory_order_release_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_RELEASE_, bits); \ + } \ + \ + void store(T desired, eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_SEQ_CST_, bits); \ + } \ + \ + public: /* load */ \ + \ + using Base::load; \ + \ + T load() const EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_SEQ_CST_, bits); \ + } \ + \ + T load(eastl::internal::memory_order_relaxed_s) const EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_RELAXED_, bits); \ + } \ + \ + T load(eastl::internal::memory_order_acquire_s) const EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_ACQUIRE_, bits); \ + } \ + \ + T load(eastl::internal::memory_order_seq_cst_s) const EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_SEQ_CST_, bits); \ + } \ + \ + public: /* exchange */ \ + \ + using Base::exchange; \ + \ + T exchange(T desired) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_SEQ_CST_, bits); \ + } \ + \ + T exchange(T desired, eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_RELAXED_, bits); \ + } \ + \ + T exchange(T desired, eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_ACQUIRE_, bits); \ + } \ + \ + T exchange(T desired, eastl::internal::memory_order_release_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_RELEASE_, bits); \ + } \ + \ + T exchange(T desired, eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_ACQ_REL_, bits); \ + } \ + \ + T exchange(T desired, eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_SEQ_CST_, bits); \ + } \ + \ + public: /* compare_exchange_weak */ \ + \ + EASTL_ATOMIC_BASE_CMPXCHG_WEAK_FUNCS_IMPL(bits) \ + \ + public: /* compare_exchange_strong */ \ + \ + EASTL_ATOMIC_BASE_CMPXCHG_STRONG_FUNCS_IMPL(bits) \ + \ + public: /* assignment operator */ \ + \ + using Base::operator=; \ + \ + T operator =(T desired) EA_NOEXCEPT \ + { \ + store(desired, eastl::memory_order_seq_cst); \ + return desired; \ + } \ + \ + atomic_base_width& operator =(const atomic_base_width&) EA_NOEXCEPT = delete; \ + atomic_base_width& operator =(const atomic_base_width&) volatile EA_NOEXCEPT = delete; \ + \ + }; + + +#if defined(EASTL_ATOMIC_HAS_8BIT) + EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(1, 8) +#endif + +#if defined(EASTL_ATOMIC_HAS_16BIT) + EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(2, 16) +#endif + +#if defined(EASTL_ATOMIC_HAS_32BIT) + EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(4, 32) +#endif + +#if defined(EASTL_ATOMIC_HAS_64BIT) + EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(8, 64) +#endif + +#if defined(EASTL_ATOMIC_HAS_128BIT) + EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(16, 128) +#endif + + +} // namespace internal + + +} // namespace eastl + + +#include "atomic_pop_compiler_options.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H */ diff --git a/include/EASTL/internal/atomic/atomic_casts.h b/include/EASTL/internal/atomic/atomic_casts.h new file mode 100644 index 00000000..303d4b05 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_casts.h @@ -0,0 +1,171 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_CASTS_H +#define EASTL_ATOMIC_INTERNAL_CASTS_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#include + + +namespace eastl +{ + + +namespace internal +{ + + +template +EASTL_FORCE_INLINE volatile T* AtomicVolatileCast(T* ptr) EA_NOEXCEPT +{ + static_assert(!eastl::is_volatile::value, "eastl::atomic : pointer must not be volatile, the pointed to type must be volatile!"); + static_assert(eastl::is_volatile::value, "eastl::atomic : the pointed to type must be volatile!"); + + return reinterpret_cast(ptr); +} + +/** + * NOTE: + * + * Some compiler intrinsics do not operate on pointer types thus + * doing atomic operations on pointers must be casted to the suitable + * sized unsigned integral type. + * + * Atomic operations on an int* might have to be casted to a uint64_t on + * a platform with 8-byte pointers as an example. + * + * Also doing an atomic operation on a struct, we must ensure that we observe + * the whole struct as one atomic unit with no shearing between the members. + * A load of a struct with two uint32_t members must be one uint64_t load, + * not two separate uint32_t loads. + */ +template +EASTL_FORCE_INLINE volatile Integral* AtomicVolatileIntegralCast(T* ptr) EA_NOEXCEPT +{ + static_assert(!eastl::is_volatile::value, "eastl::atomic : pointer must not be volatile, the pointed to type must be volatile!"); + static_assert(eastl::is_volatile::value, "eastl::atomic : the pointed to type must be volatile!"); + static_assert(eastl::is_integral::value, "eastl::atomic : Integral cast must cast to an Integral type!"); + static_assert(sizeof(Integral) == sizeof(T), "eastl::atomic : Integral and T must be same size for casting!"); + + return reinterpret_cast(ptr); +} + +template +EASTL_FORCE_INLINE Integral* AtomicIntegralCast(T* ptr) EA_NOEXCEPT +{ + static_assert(eastl::is_integral::value, "eastl::atomic : Integral cast must cast to an Integral type!"); + static_assert(sizeof(Integral) == sizeof(T), "eastl::atomic : Integral and T must be same size for casting!"); + + return reinterpret_cast(ptr); +} + + +/** + * NOTE: + * + * These casts are meant to be used with unions or structs of larger types that must be casted + * down to the smaller integral types. Like with 128-bit atomics and msvc intrinsics. + * + * struct Foo128 { __int64 array[2]; }; can be casted to a __int64* + * since a poiter to Foo128 is a pointer to the first member. + */ +template +EASTL_FORCE_INLINE volatile ToType* AtomicVolatileTypeCast(FromType* ptr) EA_NOEXCEPT +{ + static_assert(!eastl::is_volatile::value, "eastl::atomic : pointer must not be volatile, the pointed to type must be volatile!"); + static_assert(eastl::is_volatile::value, "eastl::atomic : the pointed to type must be volatile!"); + + return reinterpret_cast(ptr); +} + +template +EASTL_FORCE_INLINE ToType* AtomicTypeCast(FromType* ptr) EA_NOEXCEPT +{ + return reinterpret_cast(ptr); +} + + +/** + * NOTE: + * + * This is a compiler guaranteed safe type punning. + * This is useful when dealing with user defined structs. + * struct Test { uint32_t; unint32_t; }; + * + * Example: + * uint64_t atomicLoad = *((volatile uint64_t*)&Test); + * Test load = AtomicTypePunCast(atomicLoad); + * + * uint64_t comparand = AtomicTypePunCast(Test); + * cmpxchg(&Test, comparand, desired); + * + * This can be implemented in many different ways depending on the compiler such + * as thru a union, memcpy, reinterpret_cast(atomicLoad), etc. + */ +template +EASTL_FORCE_INLINE Pun AtomicTypePunCast(const T& fromType) EA_NOEXCEPT +{ + static_assert(sizeof(Pun) == sizeof(T), "eastl::atomic : Pun and T must be the same size for type punning!"); + + Pun ret; + memcpy(eastl::addressof(ret), eastl::addressof(fromType), sizeof(Pun)); + return ret; +} + +template +EASTL_FORCE_INLINE T AtomicNegateOperand(T val) EA_NOEXCEPT +{ + static_assert(eastl::is_integral::value, "eastl::atomic : Integral Negation must be an Integral type!"); + static_assert(!eastl::is_volatile::value, "eastl::atomic : T must not be volatile!"); + + return static_cast(0U - static_cast>(val)); +} + +EASTL_FORCE_INLINE ptrdiff_t AtomicNegateOperand(ptrdiff_t val) EA_NOEXCEPT +{ + return -val; +} + + +} // namespace internal + + +} // namespace eastl + + +/** + * NOTE: + * + * These macros are meant to prevent inclusion hell. + * Also so that it fits with the style of the rest of the atomic macro implementation. + */ +#define EASTL_ATOMIC_VOLATILE_CAST(ptr) \ + eastl::internal::AtomicVolatileCast((ptr)) + +#define EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(IntegralType, ptr) \ + eastl::internal::AtomicVolatileIntegralCast((ptr)) + +#define EASTL_ATOMIC_INTEGRAL_CAST(IntegralType, ptr) \ + eastl::internal::AtomicIntegralCast((ptr)) + +#define EASTL_ATOMIC_VOLATILE_TYPE_CAST(ToType, ptr) \ + eastl::internal::AtomicVolatileTypeCast((ptr)) + +#define EASTL_ATOMIC_TYPE_CAST(ToType, ptr) \ + eastl::internal::AtomicTypeCast((ptr)) + +#define EASTL_ATOMIC_TYPE_PUN_CAST(PunType, fromType) \ + eastl::internal::AtomicTypePunCast((fromType)) + +#define EASTL_ATOMIC_NEGATE_OPERAND(val) \ + eastl::internal::AtomicNegateOperand((val)) + + +#endif /* EASTL_ATOMIC_INTERNAL_CASTS_H */ diff --git a/include/EASTL/internal/atomic/atomic_flag.h b/include/EASTL/internal/atomic/atomic_flag.h new file mode 100644 index 00000000..f71b4def --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_flag.h @@ -0,0 +1,170 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H +#define EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#include "atomic_push_compiler_options.h" + + +namespace eastl +{ + + +class atomic_flag +{ +public: /* ctors */ + + atomic_flag(bool desired) + : mFlag{ desired } + { + } + + atomic_flag() EA_NOEXCEPT + : mFlag{ false } + { + } + +public: /* deleted ctors && assignment operators */ + + atomic_flag(const atomic_flag&) EA_NOEXCEPT = delete; + + atomic_flag& operator =(const atomic_flag&) EA_NOEXCEPT = delete; + atomic_flag& operator =(const atomic_flag&) volatile EA_NOEXCEPT = delete; + +public: /* clear */ + + template + void clear(Order order) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order); + } + + template + void clear(Order order) EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order); + } + + void clear(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT + { + mFlag.store(false, eastl::memory_order_relaxed); + } + + void clear(eastl::internal::memory_order_release_s) EA_NOEXCEPT + { + mFlag.store(false, eastl::memory_order_release); + } + + void clear(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT + { + mFlag.store(false, eastl::memory_order_seq_cst); + } + + void clear() EA_NOEXCEPT + { + mFlag.store(false, eastl::memory_order_seq_cst); + } + +public: /* test_and_set */ + + template + bool test_and_set(Order order) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order); + return false; + } + + template + bool test_and_set(Order order) EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order); + return false; + } + + bool test_and_set(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_relaxed); + } + + bool test_and_set(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_acquire); + } + + bool test_and_set(eastl::internal::memory_order_release_s) EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_release); + } + + bool test_and_set(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_acq_rel); + } + + bool test_and_set(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_seq_cst); + } + + bool test_and_set() EA_NOEXCEPT + { + return mFlag.exchange(true, eastl::memory_order_seq_cst); + } + +public: /* test */ + + template + bool test(Order order) const volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order); + return false; + } + + template + bool test(Order order) const EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order); + return false; + } + + bool test(eastl::internal::memory_order_relaxed_s) const EA_NOEXCEPT + { + return mFlag.load(eastl::memory_order_relaxed); + } + + bool test(eastl::internal::memory_order_acquire_s) const EA_NOEXCEPT + { + return mFlag.load(eastl::memory_order_acquire); + } + + bool test(eastl::internal::memory_order_seq_cst_s) const EA_NOEXCEPT + { + return mFlag.load(eastl::memory_order_seq_cst); + } + + bool test() const EA_NOEXCEPT + { + return mFlag.load(eastl::memory_order_seq_cst); + } + +private: + + eastl::atomic mFlag; +}; + + +} // namespace eastl + + +#include "atomic_pop_compiler_options.h" + + +#endif /* EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H */ diff --git a/include/EASTL/internal/atomic/atomic_flag_standalone.h b/include/EASTL/internal/atomic/atomic_flag_standalone.h new file mode 100644 index 00000000..b5284bed --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_flag_standalone.h @@ -0,0 +1,69 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H +#define EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + +//////////////////////////////////////////////////////////////////////////////// +// +// bool atomic_flag_test_and_set(eastl::atomic*) +// +EASTL_FORCE_INLINE bool atomic_flag_test_and_set(eastl::atomic_flag* atomicObj) EA_NOEXCEPT +{ + return atomicObj->test_and_set(); +} + +template +EASTL_FORCE_INLINE bool atomic_flag_test_and_set_explicit(eastl::atomic_flag* atomicObj, Order order) +{ + return atomicObj->test_and_set(order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// bool atomic_flag_clear(eastl::atomic*) +// +EASTL_FORCE_INLINE void atomic_flag_clear(eastl::atomic_flag* atomicObj) +{ + atomicObj->clear(); +} + +template +EASTL_FORCE_INLINE void atomic_flag_clear_explicit(eastl::atomic_flag* atomicObj, Order order) +{ + atomicObj->clear(order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// bool atomic_flag_test(eastl::atomic*) +// +EASTL_FORCE_INLINE bool atomic_flag_test(eastl::atomic_flag* atomicObj) +{ + return atomicObj->test(); +} + +template +EASTL_FORCE_INLINE bool atomic_flag_test_explicit(eastl::atomic_flag* atomicObj, Order order) +{ + return atomicObj->test(order); +} + + +} // namespace eastl + + +#endif /* EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H */ diff --git a/include/EASTL/internal/atomic/atomic_integral.h b/include/EASTL/internal/atomic/atomic_integral.h new file mode 100644 index 00000000..c1414446 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_integral.h @@ -0,0 +1,343 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_INTEGRAL_H +#define EASTL_ATOMIC_INTERNAL_INTEGRAL_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#include "atomic_push_compiler_options.h" + + +namespace eastl +{ + + +namespace internal +{ + + +#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(funcName) \ + template \ + T funcName(T arg, Order order) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \ + } \ + \ + template \ + T funcName(T arg, Order order) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } \ + \ + T funcName(T arg) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + + +#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(operatorOp) \ + T operator operatorOp() volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } \ + \ + T operator operatorOp(int) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + + +#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(operatorOp) \ + T operator operatorOp(T arg) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + + + template + struct atomic_integral_base : public atomic_base_width + { + private: + + using Base = atomic_base_width; + + public: /* ctors */ + + atomic_integral_base(T desired) EA_NOEXCEPT + : Base{ desired } + { + } + + atomic_integral_base() EA_NOEXCEPT = default; + + atomic_integral_base(const atomic_integral_base&) EA_NOEXCEPT = delete; + + public: /* assignment operator */ + + using Base::operator =; + + atomic_integral_base& operator =(const atomic_integral_base&) EA_NOEXCEPT = delete; + atomic_integral_base& operator =(const atomic_integral_base&) volatile EA_NOEXCEPT = delete; + + public: /* fetch_add */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_add) + + public: /* add_fetch */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(add_fetch) + + public: /* fetch_sub */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_sub) + + public: /* sub_fetch */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(sub_fetch) + + public: /* fetch_and */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_and) + + public: /* and_fetch */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(and_fetch) + + public: /* fetch_or */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_or) + + public: /* or_fetch */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(or_fetch) + + public: /* fetch_xor */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_xor) + + public: /* xor_fetch */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(xor_fetch) + + public: /* operator++ && operator-- */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(++) + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(--) + + public: /* operator+= && operator-= */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(+=) + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(-=) + + public: /* operator&= */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(&=) + + public: /* operator|= */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(|=) + + public: /* operator^= */ + + EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(^=) + + }; + + + template + struct atomic_integral_width; + +#define EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits) \ + T retVal; \ + EA_PREPROCESSOR_JOIN(op, bits)(T, retVal, this->GetAtomicAddress(), arg); \ + return retVal; + +#define EASTL_ATOMIC_INTEGRAL_FETCH_IMPL(funcName, op, bits) \ + T funcName(T arg) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits); \ + } + +#define EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, orderType, op, bits) \ + T funcName(T arg, orderType) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits); \ + } + +#define EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, Order) \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, fetchOp), Order) + +#define EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(funcName, fetchOp, bits) \ + using Base::funcName; \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_IMPL(funcName, EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_relaxed_s, \ + EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _RELAXED_), bits) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acquire_s, \ + EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _ACQUIRE_), bits) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_release_s, \ + EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _RELEASE_), bits) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acq_rel_s, \ + EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _ACQ_REL_), bits) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_seq_cst_s, \ + EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) + +#define EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(operatorOp, preFuncName, postFuncName) \ + using Base::operator operatorOp; \ + \ + T operator operatorOp() EA_NOEXCEPT \ + { \ + return preFuncName(1, eastl::memory_order_seq_cst); \ + } \ + \ + T operator operatorOp(int) EA_NOEXCEPT \ + { \ + return postFuncName(1, eastl::memory_order_seq_cst); \ + } + +#define EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(operatorOp, funcName) \ + using Base::operator operatorOp; \ + \ + T operator operatorOp(T arg) EA_NOEXCEPT \ + { \ + return funcName(arg, eastl::memory_order_seq_cst); \ + } + + +#define EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(bytes, bits) \ + template \ + struct atomic_integral_width : public atomic_integral_base \ + { \ + private: \ + \ + using Base = atomic_integral_base; \ + \ + public: /* ctors */ \ + \ + atomic_integral_width(T desired) EA_NOEXCEPT \ + : Base{ desired } \ + { \ + } \ + \ + atomic_integral_width() EA_NOEXCEPT = default; \ + \ + atomic_integral_width(const atomic_integral_width&) EA_NOEXCEPT = delete; \ + \ + public: /* assignment operator */ \ + \ + using Base::operator =; \ + \ + atomic_integral_width& operator =(const atomic_integral_width&) EA_NOEXCEPT = delete; \ + atomic_integral_width& operator =(const atomic_integral_width&) volatile EA_NOEXCEPT = delete; \ + \ + public: /* fetch_add */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_add, FETCH_ADD, bits) \ + \ + public: /* add_fetch */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(add_fetch, ADD_FETCH, bits) \ + \ + public: /* fetch_sub */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_sub, FETCH_SUB, bits) \ + \ + public: /* sub_fetch */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(sub_fetch, SUB_FETCH, bits) \ + \ + public: /* fetch_and */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_and, FETCH_AND, bits) \ + \ + public: /* and_fetch */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(and_fetch, AND_FETCH, bits) \ + \ + public: /* fetch_or */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_or, FETCH_OR, bits) \ + \ + public: /* or_fetch */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(or_fetch, OR_FETCH, bits) \ + \ + public: /* fetch_xor */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_xor, FETCH_XOR, bits) \ + \ + public: /* xor_fetch */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(xor_fetch, XOR_FETCH, bits) \ + \ + public: /* operator++ && operator-- */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(++, add_fetch, fetch_add) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(--, sub_fetch, fetch_sub) \ + \ + public: /* operator+= && operator-= */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(+=, add_fetch) \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(-=, sub_fetch) \ + \ + public: /* operator&= */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(&=, and_fetch) \ + \ + public: /* operator|= */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(|=, or_fetch) \ + \ + public: /* operator^= */ \ + \ + EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(^=, xor_fetch) \ + \ + }; + + +#if defined(EASTL_ATOMIC_HAS_8BIT) + EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(1, 8) +#endif + +#if defined(EASTL_ATOMIC_HAS_16BIT) + EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(2, 16) +#endif + +#if defined(EASTL_ATOMIC_HAS_32BIT) + EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(4, 32) +#endif + +#if defined(EASTL_ATOMIC_HAS_64BIT) + EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(8, 64) +#endif + +#if defined(EASTL_ATOMIC_HAS_128BIT) + EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(16, 128) +#endif + + +} // namespace internal + + +} // namespace eastl + + +#include "atomic_pop_compiler_options.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_INTEGRAL_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros.h b/include/EASTL/internal/atomic/atomic_macros.h new file mode 100644 index 00000000..8bdcc330 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros.h @@ -0,0 +1,61 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_H +#define EASTL_ATOMIC_INTERNAL_MACROS_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// The reason for the implementation separating out into a compiler and architecture +// folder is as follows. +// +// The compiler directory is meant to implement atomics using the compiler provided +// intrinsics. This also implies that usually the same compiler instrinsic implementation +// can be used for any architecture the compiler supports. If a compiler provides intrinsics +// to support barriers or atomic operations, then that implementation should be in the +// compiler directory. +// +// The arch directory is meant to manually implement atomics for a specific architecture +// such as power or x86. There may be some compiler specific code in this directory because +// GCC inline assembly syntax may be different than another compiler as an example. +// +// The arch directory can also be used to implement some atomic operations ourselves +// if we deem the compiler provided implementation to be inefficient for the given +// architecture or we need to do some things manually for a given compiler. +// +// The atomic_macros directory implements the macros that the rest of the atomic +// library uses. These macros will expand to either the compiler or arch implemented +// macro. The arch implemented macro is given priority over the compiler implemented +// macro if both are implemented otherwise whichever is implemented is chosen or +// an error is emitted if none are implemented. +// +// The implementation being all macros has a couple nice side effects as well. +// +// 1. All the implementation ends up funneling into one low level macro implementation +// which makes it easy to verify correctness, reduce copy-paste errors and differences +// in various platform implementations. +// +// 2. Allows for the implementation to be implemented efficiently on compilers that do not +// directly implement the C++ memory model such as msvc. +// +// 3. Allows for the implementation of atomics that may not be supported on the given platform, +// such as 128-bit atomics on 32-bit platforms since the macros will only ever be expanded +// on platforms that support said features. This makes implementing said features pretty easy +// since we do not have to worry about complicated feature detection in the low level implementations. +// + + +#include "compiler/compiler.h" +#include "arch/arch.h" + +#include "atomic_macros/atomic_macros.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h new file mode 100644 index 00000000..d1defe9a --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h @@ -0,0 +1,87 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H +#define EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#include "atomic_macros_base.h" + +#include "atomic_macros_fetch_add.h" +#include "atomic_macros_fetch_sub.h" + +#include "atomic_macros_fetch_and.h" +#include "atomic_macros_fetch_xor.h" +#include "atomic_macros_fetch_or.h" + +#include "atomic_macros_add_fetch.h" +#include "atomic_macros_sub_fetch.h" + +#include "atomic_macros_and_fetch.h" +#include "atomic_macros_xor_fetch.h" +#include "atomic_macros_or_fetch.h" + +#include "atomic_macros_exchange.h" + +#include "atomic_macros_cmpxchg_weak.h" +#include "atomic_macros_cmpxchg_strong.h" + +#include "atomic_macros_load.h" +#include "atomic_macros_store.h" + +#include "atomic_macros_compiler_barrier.h" + +#include "atomic_macros_cpu_pause.h" + +#include "atomic_macros_memory_barrier.h" + +#include "atomic_macros_signal_fence.h" + +#include "atomic_macros_thread_fence.h" + + +///////////////////////////////////////////////////////////////////////////////// + + +#if defined(EASTL_COMPILER_ATOMIC_HAS_8BIT) || defined(EASTL_ARCH_ATOMIC_HAS_8BIT) + + #define EASTL_ATOMIC_HAS_8BIT + +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_HAS_16BIT) || defined(EASTL_ARCH_ATOMIC_HAS_16BIT) + + #define EASTL_ATOMIC_HAS_16BIT + +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_HAS_32BIT) || defined(EASTL_ARCH_ATOMIC_HAS_32BIT) + + #define EASTL_ATOMIC_HAS_32BIT + +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_HAS_64BIT) || defined(EASTL_ARCH_ATOMIC_HAS_64BIT) + + #define EASTL_ATOMIC_HAS_64BIT + +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_HAS_128BIT) || defined(EASTL_ARCH_ATOMIC_HAS_128BIT) + + #define EASTL_ATOMIC_HAS_128BIT + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h new file mode 100644 index 00000000..f551a07c --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h new file mode 100644 index 00000000..69127223 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h new file mode 100644 index 00000000..b9055d1b --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h @@ -0,0 +1,65 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_BASE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_BASE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_ATOMIC_INTERNAL_COMPILER_AVAILABLE(op) \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_COMPILER_, op), _AVAILABLE) + +#define EASTL_ATOMIC_INTERNAL_ARCH_AVAILABLE(op) \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op), _AVAILABLE) + +#define EASTL_ATOMIC_INTERNAL_NOT_IMPLEMENTED_ERROR(...) \ + static_assert(false, "eastl::atomic atomic macro not implemented!") + + +/* Compiler && Arch Not Implemented */ +#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_00(op) \ + EASTL_ATOMIC_INTERNAL_NOT_IMPLEMENTED_ERROR + +/* Arch Implemented */ +#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_01(op) \ + EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op) + +/* Compiler Implmented */ +#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_10(op) \ + EA_PREPROCESSOR_JOIN(EASTL_COMPILER_, op) + +/* Compiler && Arch Implemented */ +#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_11(op) \ + EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op) + + +/* This macro creates the pattern macros above for the 2x2 True-False truth table */ +#define EASTL_ATOMIC_INTERNAL_OP_HELPER1(compiler, arch, op) \ + EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_INTERNAL_OP_PATTERN_, EA_PREPROCESSOR_JOIN(compiler, arch))(op) + + +///////////////////////////////////////////////////////////////////////////////// +// +// EASTL_ATOMIC_CHOOSE_OP_IMPL +// +// This macro chooses between the compiler or architecture implementation for a +// given atomic operation +// +// USAGE: +// +// EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_8)(ret, ptr, val) +// +#define EASTL_ATOMIC_CHOOSE_OP_IMPL(op) \ + EASTL_ATOMIC_INTERNAL_OP_HELPER1( \ + EASTL_ATOMIC_INTERNAL_COMPILER_AVAILABLE(op), \ + EASTL_ATOMIC_INTERNAL_ARCH_AVAILABLE(op), \ + op \ + ) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_BASE_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h new file mode 100644 index 00000000..3cff4935 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h @@ -0,0 +1,245 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128)(type, ret, ptr, expected, desired) + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CMPXCHG_STRONG_*(bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_8)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_16)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_32)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_64)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_128)(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h new file mode 100644 index 00000000..60ea8b0b --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h @@ -0,0 +1,245 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128)(type, ret, ptr, expected, desired) + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CMPXCHG_WEAK_*(bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_8)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_8)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_16)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_16)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_32)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_32)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_64)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_64)(type, ret, ptr, expected, desired) + + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_128)(type, ret, ptr, expected, desired) + +#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_128)(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h new file mode 100644 index 00000000..96ea6d0b --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h @@ -0,0 +1,30 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_COMPILER_BARRIER() +// +#define EASTL_ATOMIC_COMPILER_BARRIER() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_COMPILER_BARRIER)() + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type) +// +#define EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY)(val, type) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h new file mode 100644 index 00000000..e027b576 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h @@ -0,0 +1,22 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CPU_PAUSE() +// +#define EASTL_ATOMIC_CPU_PAUSE() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_PAUSE)() + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h new file mode 100644 index 00000000..0681318f --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h new file mode 100644 index 00000000..701fdf37 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h new file mode 100644 index 00000000..831f1bfe --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h new file mode 100644 index 00000000..b1322970 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h new file mode 100644 index 00000000..00980643 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h new file mode 100644 index 00000000..2887ea56 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h new file mode 100644 index 00000000..76580593 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h @@ -0,0 +1,75 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H +#define EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// +#define EASTL_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_8)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_8)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_8)(type, ret, ptr) + + +#define EASTL_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_16)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_16)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_16)(type, ret, ptr) + + +#define EASTL_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_32)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_32)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_32)(type, ret, ptr) + + +#define EASTL_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_64)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_64)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_64)(type, ret, ptr) + + +#define EASTL_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_128)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_128)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_128)(type, ret, ptr) + + +#define EASTL_ATOMIC_LOAD_READ_DEPENDS_32(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_READ_DEPENDS_32)(type, ret, ptr) + +#define EASTL_ATOMIC_LOAD_READ_DEPENDS_64(type, ret, ptr) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_READ_DEPENDS_64)(type, ret, ptr) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h new file mode 100644 index 00000000..9a7e818c --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h @@ -0,0 +1,38 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) +#pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CPU_MB() +// +#define EASTL_ATOMIC_CPU_MB() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_MB)() + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CPU_WMB() +// +#define EASTL_ATOMIC_CPU_WMB() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_WMB)() + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_CPU_RMB() +// +#define EASTL_ATOMIC_CPU_RMB() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_RMB)() + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h new file mode 100644 index 00000000..f3df54ec --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h @@ -0,0 +1,97 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h new file mode 100644 index 00000000..dd16b106 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h @@ -0,0 +1,34 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_SIGNAL_FENCE_*() +// +#define EASTL_ATOMIC_SIGNAL_FENCE_RELAXED() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_RELAXED)() + +#define EASTL_ATOMIC_SIGNAL_FENCE_ACQUIRE() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_ACQUIRE)() + +#define EASTL_ATOMIC_SIGNAL_FENCE_RELEASE() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_RELEASE)() + +#define EASTL_ATOMIC_SIGNAL_FENCE_ACQ_REL() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_ACQ_REL)() + +#define EASTL_ATOMIC_SIGNAL_FENCE_SEQ_CST() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_SEQ_CST)() + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h new file mode 100644 index 00000000..64b662e1 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h @@ -0,0 +1,68 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_STORE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#define EASTL_ATOMIC_STORE_RELAXED_8(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_8)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_RELEASE_8(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_8)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_8)(type, ptr, val) + + +#define EASTL_ATOMIC_STORE_RELAXED_16(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_16)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_RELEASE_16(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_16)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_16)(type, ptr, val) + + +#define EASTL_ATOMIC_STORE_RELAXED_32(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_32)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_RELEASE_32(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_32)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_32)(type, ptr, val) + + +#define EASTL_ATOMIC_STORE_RELAXED_64(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_64)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_RELEASE_64(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_64)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_64)(type, ptr, val) + + +#define EASTL_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_128)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_128)(type, ptr, val) + +#define EASTL_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_128)(type, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_STORE_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h new file mode 100644 index 00000000..330f38e9 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h new file mode 100644 index 00000000..26492c59 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h @@ -0,0 +1,34 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_THREAD_FENCE_*() +// +#define EASTL_ATOMIC_THREAD_FENCE_RELAXED() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_RELAXED)() + +#define EASTL_ATOMIC_THREAD_FENCE_ACQUIRE() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_ACQUIRE)() + +#define EASTL_ATOMIC_THREAD_FENCE_RELEASE() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_RELEASE)() + +#define EASTL_ATOMIC_THREAD_FENCE_ACQ_REL() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_ACQ_REL)() + +#define EASTL_ATOMIC_THREAD_FENCE_SEQ_CST() \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_SEQ_CST)() + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H */ diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h new file mode 100644 index 00000000..42276470 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h @@ -0,0 +1,98 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_8)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_8)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_16)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_16)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_32)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_32)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_64)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_64)(type, ret, ptr, val) + + +#define EASTL_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_128)(type, ret, ptr, val) + +#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_128)(type, ret, ptr, val) + + +#endif /* EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H */ diff --git a/include/EASTL/internal/atomic/atomic_memory_order.h b/include/EASTL/internal/atomic/atomic_memory_order.h new file mode 100644 index 00000000..b1c14035 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_memory_order.h @@ -0,0 +1,44 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H +#define EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + +namespace internal +{ + + +struct memory_order_relaxed_s {}; +struct memory_order_read_depends_s {}; +struct memory_order_acquire_s {}; +struct memory_order_release_s {}; +struct memory_order_acq_rel_s {}; +struct memory_order_seq_cst_s {}; + + +} // namespace internal + + +EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_relaxed = internal::memory_order_relaxed_s{}; +EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_read_depends = internal::memory_order_read_depends_s{}; +EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_acquire = internal::memory_order_acquire_s{}; +EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_release = internal::memory_order_release_s{}; +EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_acq_rel = internal::memory_order_acq_rel_s{}; +EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_seq_cst = internal::memory_order_seq_cst_s{}; + + +} // namespace eastl + + +#endif /* EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H */ diff --git a/include/EASTL/internal/atomic/atomic_pointer.h b/include/EASTL/internal/atomic/atomic_pointer.h new file mode 100644 index 00000000..93482791 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_pointer.h @@ -0,0 +1,277 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_POINTER_H +#define EASTL_ATOMIC_INTERNAL_POINTER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#include "atomic_push_compiler_options.h" + + +namespace eastl +{ + + +namespace internal +{ + + + template + struct atomic_pointer_base; + +#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(funcName) \ + template \ + T* funcName(ptrdiff_t arg, Order order) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \ + } \ + \ + template \ + T* funcName(ptrdiff_t arg, Order order) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } \ + \ + T* funcName(ptrdiff_t arg) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + +#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(operatorOp) \ + T* operator operatorOp() volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } \ + \ + T* operator operatorOp(int) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + +#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(operatorOp) \ + T* operator operatorOp(ptrdiff_t arg) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + } + + + template + struct atomic_pointer_base : public atomic_base_width + { + private: + + using Base = atomic_base_width; + + public: /* ctors */ + + atomic_pointer_base(T* desired) EA_NOEXCEPT + : Base{ desired } + { + } + + atomic_pointer_base() EA_NOEXCEPT = default; + + public: /* assignment operators */ + + using Base::operator =; + + atomic_pointer_base& operator =(const atomic_pointer_base&) EA_NOEXCEPT = delete; + atomic_pointer_base& operator =(const atomic_pointer_base&) volatile EA_NOEXCEPT = delete; + + public: /* fetch_add */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(fetch_add) + + public: /* add_fetch */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(add_fetch) + + public: /* fetch_sub */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(fetch_sub) + + public: /* sub_fetch */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(sub_fetch) + + public: /* operator++ && operator-- */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(++) + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(--) + + public: /* operator+= && operator-= */ + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(+=) + + EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(-=) + + }; + + + template + struct atomic_pointer_width; + +#define EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits) \ + T* retVal; \ + { \ + ptr_integral_type retType; \ + ptr_integral_type addend = static_cast(arg) * static_cast(sizeof(T)); \ + \ + EA_PREPROCESSOR_JOIN(op, bits)(ptr_integral_type, retType, EASTL_ATOMIC_INTEGRAL_CAST(ptr_integral_type, this->GetAtomicAddress()), addend); \ + \ + retVal = reinterpret_cast(retType); \ + } \ + return retVal; + +#define EASTL_ATOMIC_POINTER_FETCH_IMPL(funcName, op, bits) \ + T* funcName(ptrdiff_t arg) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(T); \ + EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits); \ + } + +#define EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, orderType, op, bits) \ + T* funcName(ptrdiff_t arg, orderType) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(T); \ + EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits); \ + } + +#define EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, Order) \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, fetchOp), Order) + +#define EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(funcName, fetchOp, bits) \ + using Base::funcName; \ + \ + EASTL_ATOMIC_POINTER_FETCH_IMPL(funcName, EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_relaxed_s, \ + EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _RELAXED_), bits) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acquire_s, \ + EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _ACQUIRE_), bits) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_release_s, \ + EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _RELEASE_), bits) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acq_rel_s, \ + EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _ACQ_REL_), bits) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_seq_cst_s, \ + EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) + +#define EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(operatorOp, preFuncName, postFuncName) \ + using Base::operator operatorOp; \ + \ + T* operator operatorOp() EA_NOEXCEPT \ + { \ + return preFuncName(1, eastl::memory_order_seq_cst); \ + } \ + \ + T* operator operatorOp(int) EA_NOEXCEPT \ + { \ + return postFuncName(1, eastl::memory_order_seq_cst); \ + } + +#define EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(operatorOp, funcName) \ + using Base::operator operatorOp; \ + \ + T* operator operatorOp(ptrdiff_t arg) EA_NOEXCEPT \ + { \ + return funcName(arg, eastl::memory_order_seq_cst); \ + } + + +#define EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(bytes, bits) \ + template \ + struct atomic_pointer_width : public atomic_pointer_base \ + { \ + private: \ + \ + using Base = atomic_pointer_base; \ + using u_ptr_integral_type = EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(uint, bits), _t); \ + using ptr_integral_type = EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(int, bits), _t); \ + \ + public: /* ctors */ \ + \ + atomic_pointer_width(T* desired) EA_NOEXCEPT \ + : Base{ desired } \ + { \ + } \ + \ + atomic_pointer_width() EA_NOEXCEPT = default; \ + \ + public: /* assignment operators */ \ + \ + using Base::operator =; \ + \ + atomic_pointer_width& operator =(const atomic_pointer_width&) EA_NOEXCEPT = delete; \ + atomic_pointer_width& operator =(const atomic_pointer_width&) volatile EA_NOEXCEPT = delete; \ + \ + public: /* fetch_add */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(fetch_add, FETCH_ADD, bits) \ + \ + public: /* add_fetch */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(add_fetch, ADD_FETCH, bits) \ + \ + public: /* fetch_sub */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(fetch_sub, FETCH_SUB, bits) \ + \ + public: /* sub_fetch */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(sub_fetch, SUB_FETCH, bits) \ + \ + public: /* operator++ && operator-- */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(++, add_fetch, fetch_add) \ + \ + EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(--, sub_fetch, fetch_sub) \ + \ + public: /* operator+= && operator-= */ \ + \ + EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(+=, add_fetch) \ + \ + EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(-=, sub_fetch) \ + \ + public: \ + \ + using Base::load; \ + \ + T* load(eastl::internal::memory_order_read_depends_s) EA_NOEXCEPT \ + { \ + T* retPointer; \ + EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_LOAD_READ_DEPENDS_, bits)(T*, retPointer, this->GetAtomicAddress()); \ + return retPointer; \ + } \ + }; + + +#if defined(EASTL_ATOMIC_HAS_32BIT) && EA_PLATFORM_PTR_SIZE == 4 + EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(4, 32) +#endif + +#if defined(EASTL_ATOMIC_HAS_64BIT) && EA_PLATFORM_PTR_SIZE == 8 + EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(8, 64) +#endif + + +} // namespace internal + + +} // namespace eastl + + +#include "atomic_pop_compiler_options.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_POINTER_H */ diff --git a/test/source/TestSparseMatrix.cpp b/include/EASTL/internal/atomic/atomic_pop_compiler_options.h similarity index 60% rename from test/source/TestSparseMatrix.cpp rename to include/EASTL/internal/atomic/atomic_pop_compiler_options.h index d8b6eff7..92f241a1 100644 --- a/test/source/TestSparseMatrix.cpp +++ b/include/EASTL/internal/atomic/atomic_pop_compiler_options.h @@ -3,27 +3,9 @@ ///////////////////////////////////////////////////////////////////////////// -#include "EASTLTest.h" -#include - - - - -int TestSparseMatrix() -{ - int nErrorCount = 0; - - return nErrorCount; -} - - - - - - - - - +/* NOTE: No Header Guard */ +EA_RESTORE_VC_WARNING(); +EA_RESTORE_CLANG_WARNING(); diff --git a/include/EASTL/internal/atomic/atomic_push_compiler_options.h b/include/EASTL/internal/atomic/atomic_push_compiler_options.h new file mode 100644 index 00000000..c5a54715 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_push_compiler_options.h @@ -0,0 +1,17 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +/* NOTE: No Header Guard */ + + +// 'class' : multiple assignment operators specified +EA_DISABLE_VC_WARNING(4522); + +// misaligned atomic operation may incur significant performance penalty +// The above warning is emitted in earlier versions of clang incorrectly. +// All eastl::atomic objects are size aligned. +// This is static and runtime asserted. +// Thus we disable this warning. +EA_DISABLE_CLANG_WARNING(-Watomic-alignment); diff --git a/include/EASTL/internal/atomic/atomic_size_aligned.h b/include/EASTL/internal/atomic/atomic_size_aligned.h new file mode 100644 index 00000000..c76f9834 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_size_aligned.h @@ -0,0 +1,199 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H +#define EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#include "atomic_push_compiler_options.h" + + +namespace eastl +{ + + +namespace internal +{ + + +#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(funcName) \ + template \ + bool funcName(T& expected, T desired, \ + OrderSuccess orderSuccess, \ + OrderFailure orderFailure) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \ + return false; \ + } \ + \ + template \ + bool funcName(T& expected, T desired, \ + OrderSuccess orderSuccess, \ + OrderFailure orderFailure) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + return false; \ + } \ + \ + template \ + bool funcName(T& expected, T desired, \ + Order order) EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \ + return false; \ + } \ + \ + template \ + bool funcName(T& expected, T desired, \ + Order order) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + return false; \ + } \ + \ + bool funcName(T& expected, T desired) volatile EA_NOEXCEPT \ + { \ + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \ + return false; \ + } + +#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_WEAK_IMPL() \ + EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(compare_exchange_weak) + +#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_STRONG_IMPL() \ + EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(compare_exchange_strong) + + + template + struct atomic_size_aligned + { + public: /* ctors */ + + atomic_size_aligned(T desired) EA_NOEXCEPT + : mAtomic{ desired } + { + EASTL_ATOMIC_ASSERT_ALIGNED(sizeof(T)); + } + + atomic_size_aligned() EA_NOEXCEPT + : mAtomic{} /* Zero-Initialized */ + { + EASTL_ATOMIC_ASSERT_ALIGNED(sizeof(T)); + } + + atomic_size_aligned(const atomic_size_aligned&) EA_NOEXCEPT = delete; + + public: /* store */ + + template + void store(T desired, Order order) EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); + } + + template + void store(T desired, Order order) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + void store(T desired) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + public: /* load */ + + template + T load(Order order) const EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); + } + + template + T load(Order order) const volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + T load() const volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + public: /* exchange */ + + template + T exchange(T desired, Order order) EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); + } + + template + T exchange(T desired, Order order) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + T exchange(T desired) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + public: /* compare_exchange_weak */ + + EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_WEAK_IMPL() + + public: /* compare_exchange_strong */ + + EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_STRONG_IMPL() + + public: /* assignment operator */ + + T operator =(T desired) volatile EA_NOEXCEPT + { + EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); + } + + atomic_size_aligned& operator =(const atomic_size_aligned&) EA_NOEXCEPT = delete; + atomic_size_aligned& operator =(const atomic_size_aligned&) volatile EA_NOEXCEPT = delete; + + protected: /* Accessors */ + + T* GetAtomicAddress() const EA_NOEXCEPT + { + return eastl::addressof(mAtomic); + } + + private: + + /** + * Some compilers such as MSVC will align 64-bit values on 32-bit machines on + * 4-byte boundaries which can ruin the atomicity guarantees. + * + * Ensure everything is size aligned. + * + * mutable is needed in cases such as when loads are only guaranteed to be atomic + * using a compare exchange, such as for 128-bit atomics, so we need to be able + * to have write access to the variable as one example. + */ + EA_ALIGN(sizeof(T)) mutable T mAtomic; + }; + + +} // namespace internal + + +} // namespace eastl + + +#include "atomic_pop_compiler_options.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H */ diff --git a/include/EASTL/internal/atomic/atomic_standalone.h b/include/EASTL/internal/atomic/atomic_standalone.h new file mode 100644 index 00000000..ec0fb331 --- /dev/null +++ b/include/EASTL/internal/atomic/atomic_standalone.h @@ -0,0 +1,478 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_STANDALONE_H +#define EASTL_ATOMIC_INTERNAL_STANDALONE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + +//////////////////////////////////////////////////////////////////////////////// +// +// bool atomic_compare_exchange_strong(eastl::atomic*, T* expected, T desired) +// +template +EASTL_FORCE_INLINE bool atomic_compare_exchange_strong(eastl::atomic* atomicObj, + typename eastl::atomic::value_type* expected, + typename eastl::atomic::value_type desired) EA_NOEXCEPT +{ + return atomicObj->compare_exchange_strong(*expected, desired); +} + +template +EASTL_FORCE_INLINE bool atomic_compare_exchange_strong_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type* expected, + typename eastl::atomic::value_type desired, + OrderSuccess orderSuccess, OrderFailure orderFailure) EA_NOEXCEPT +{ + return atomicObj->compare_exchange_strong(*expected, desired, orderSuccess, orderFailure); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// bool atomic_compare_exchange_weak(eastl::atomic*, T* expected, T desired) +// +template +EASTL_FORCE_INLINE bool atomic_compare_exchange_weak(eastl::atomic* atomicObj, + typename eastl::atomic::value_type* expected, + typename eastl::atomic::value_type desired) EA_NOEXCEPT +{ + return atomicObj->compare_exchange_weak(*expected, desired); +} + +template +EASTL_FORCE_INLINE bool atomic_compare_exchange_weak_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type* expected, + typename eastl::atomic::value_type desired, + OrderSuccess orderSuccess, OrderFailure orderFailure) EA_NOEXCEPT +{ + return atomicObj->compare_exchange_weak(*expected, desired, orderSuccess, orderFailure); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_fetch_xor(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_xor(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->fetch_xor(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_xor_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->fetch_xor(arg, order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_xor_fetch(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_xor_fetch(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->xor_fetch(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_xor_fetch_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->xor_fetch(arg, order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_fetch_or(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_or(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->fetch_or(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_or_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->fetch_or(arg, order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_or_fetch(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_or_fetch(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->or_fetch(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_or_fetch_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->or_fetch(arg, order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_fetch_and(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_and(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->fetch_and(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_and_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->fetch_and(arg, order); +} + + +//////////////////////////////////////////////////////////////////////////////// +// +// T atomic_and_fetch(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_and_fetch(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg) EA_NOEXCEPT +{ + return atomicObj->and_fetch(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_and_fetch_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->and_fetch(arg, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_fetch_sub(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_sub(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg) EA_NOEXCEPT +{ + return atomicObj->fetch_sub(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_sub_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->fetch_sub(arg, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_sub_fetch(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_sub_fetch(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg) EA_NOEXCEPT +{ + return atomicObj->sub_fetch(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_sub_fetch_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->sub_fetch(arg, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_fetch_add(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_add(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg) EA_NOEXCEPT +{ + return atomicObj->fetch_add(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_fetch_add_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->fetch_add(arg, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_add_fetch(eastl::atomic*, T arg) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_add_fetch(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg) EA_NOEXCEPT +{ + return atomicObj->add_fetch(arg); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_add_fetch_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::difference_type arg, + Order order) EA_NOEXCEPT +{ + return atomicObj->add_fetch(arg, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_exchange(eastl::atomic*, T desired) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_exchange(eastl::atomic* atomicObj, + typename eastl::atomic::value_type desired) EA_NOEXCEPT +{ + return atomicObj->exchange(desired); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_exchange_explicit(eastl::atomic* atomicObj, + typename eastl::atomic::value_type desired, + Order order) EA_NOEXCEPT +{ + return atomicObj->exchange(desired, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_load(const eastl::atomic*) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_load(const eastl::atomic* atomicObj) EA_NOEXCEPT +{ + return atomicObj->load(); +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_load_explicit(const eastl::atomic* atomicObj, Order order) EA_NOEXCEPT +{ + return atomicObj->load(order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// T atomic_load_cond(const eastl::atomic*) +// +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_load_cond(const eastl::atomic* atomicObj, Predicate pred) EA_NOEXCEPT +{ + typename eastl::atomic::value_type ret; + + for (;;) + { + ret = atomicObj->load(); + + if (pred(ret)) + { + break; + } + + EASTL_ATOMIC_CPU_PAUSE(); + } + + return ret; +} + +template +EASTL_FORCE_INLINE typename eastl::atomic::value_type atomic_load_cond_explicit(const eastl::atomic* atomicObj, Predicate pred, Order order) EA_NOEXCEPT +{ + typename eastl::atomic::value_type ret; + + for (;;) + { + ret = atomicObj->load(order); + + if (pred(ret)) + { + break; + } + + EASTL_ATOMIC_CPU_PAUSE(); + } + + return ret; +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void atomic_store(eastl::atomic*, T) +// +template +EASTL_FORCE_INLINE void atomic_store(eastl::atomic* atomicObj, typename eastl::atomic::value_type desired) EA_NOEXCEPT +{ + atomicObj->store(desired); +} + +template +EASTL_FORCE_INLINE void atomic_store_explicit(eastl::atomic* atomicObj, typename eastl::atomic::value_type desired, Order order) EA_NOEXCEPT +{ + atomicObj->store(desired, order); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void eastl::atomic_thread_fence(Order) +// +template +EASTL_FORCE_INLINE void atomic_thread_fence(Order) EA_NOEXCEPT +{ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order); +} + +EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_THREAD_FENCE_RELAXED(); +} + +EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_THREAD_FENCE_ACQUIRE(); +} + +EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_release_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_THREAD_FENCE_RELEASE(); +} + +EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_THREAD_FENCE_ACQ_REL(); +} + +EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_THREAD_FENCE_SEQ_CST(); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void eastl::atomic_signal_fence(Order) +// +template +EASTL_FORCE_INLINE void atomic_signal_fence(Order) EA_NOEXCEPT +{ + EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order); +} + +EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_SIGNAL_FENCE_RELAXED(); +} + +EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_SIGNAL_FENCE_ACQUIRE(); +} + +EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_release_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_SIGNAL_FENCE_RELEASE(); +} + +EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_SIGNAL_FENCE_ACQ_REL(); +} + +EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT +{ + EASTL_ATOMIC_SIGNAL_FENCE_SEQ_CST(); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void eastl::compiler_barrier() +// +EASTL_FORCE_INLINE void compiler_barrier() EA_NOEXCEPT +{ + EASTL_ATOMIC_COMPILER_BARRIER(); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void eastl::compiler_barrier_data_dependency(const T&) +// +template +EASTL_FORCE_INLINE void compiler_barrier_data_dependency(const T& val) EA_NOEXCEPT +{ + EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, T); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// void eastl::cpu_pause() +// +EASTL_FORCE_INLINE void cpu_pause() EA_NOEXCEPT +{ + EASTL_ATOMIC_CPU_PAUSE(); +} + + +///////////////////////////////////////////////////////////////////////////////// +// +// bool eastl::atomic_is_lock_free(eastl::atomic*) +// +template +EASTL_FORCE_INLINE bool atomic_is_lock_free(const eastl::atomic* atomicObj) EA_NOEXCEPT +{ + return atomicObj->is_lock_free(); +} + + +} // namespace eastl + + +#endif /* EASTL_ATOMIC_INTERNAL_STANDALONE_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler.h b/include/EASTL/internal/atomic/compiler/compiler.h new file mode 100644 index 00000000..2fd220ca --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler.h @@ -0,0 +1,116 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// Include the compiler specific implementations +// +#if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG) + + #include "gcc/compiler_gcc.h" + +#elif defined(EA_COMPILER_MSVC) + + #include "msvc/compiler_msvc.h" + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + +namespace eastl +{ + +namespace internal +{ + + +/** + * NOTE: + * This can be used by specific compiler implementations to implement a data dependency compiler barrier. + * Some compiler barriers do not take in input dependencies as is possible with the gcc asm syntax. + * Thus we need a way to create a false dependency on the input variable so the compiler does not dead-store + * remove it. + * A volatile function pointer ensures the compiler must always load the function pointer and call thru it + * since the compiler cannot reason about any side effects. Thus the compiler must always assume the + * input variable may be accessed and thus cannot be dead-stored. This technique works even in the presence + * of Link-Time Optimization. A compiler barrier with a data dependency is useful in these situations. + * + * void foo() + * { + * eastl::vector v; + * while (Benchmark.ContinueRunning()) + * { + * v.push_back(0); + * eastl::compiler_barrier(); OR eastl::compiler_barrier_data_dependency(v); + * } + * } + * + * We are trying to benchmark the push_back function of a vector. The vector v has only local scope. + * The compiler is well within its writes to remove all accesses to v even with the compiler barrier + * because there are no observable uses of the vector v. + * The compiler barrier data dependency ensures there is an input dependency on the variable so that + * it isn't removed. This is also useful when writing test code that the compiler may remove. + */ + +typedef void (*CompilerBarrierDataDependencyFuncPtr)(void*); + +extern EASTL_API volatile CompilerBarrierDataDependencyFuncPtr gCompilerBarrierDataDependencyFunc; + + +#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_FUNC(ptr) \ + eastl::internal::gCompilerBarrierDataDependencyFunc(ptr) + + +} // namespace internal + +} // namespace eastl + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "compiler_fetch_add.h" +#include "compiler_fetch_sub.h" + +#include "compiler_fetch_and.h" +#include "compiler_fetch_xor.h" +#include "compiler_fetch_or.h" + +#include "compiler_add_fetch.h" +#include "compiler_sub_fetch.h" + +#include "compiler_and_fetch.h" +#include "compiler_xor_fetch.h" +#include "compiler_or_fetch.h" + +#include "compiler_exchange.h" + +#include "compiler_cmpxchg_weak.h" +#include "compiler_cmpxchg_strong.h" + +#include "compiler_load.h" +#include "compiler_store.h" + +#include "compiler_barrier.h" + +#include "compiler_cpu_pause.h" + +#include "compiler_memory_barrier.h" + +#include "compiler_signal_fence.h" + +#include "compiler_thread_fence.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h b/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h new file mode 100644 index 00000000..763921c4 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h b/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h new file mode 100644 index 00000000..7b1e0a42 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_barrier.h b/include/EASTL/internal/atomic/compiler/compiler_barrier.h new file mode 100644 index 00000000..550070e3 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_barrier.h @@ -0,0 +1,36 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() +// +#if defined(EASTL_COMPILER_ATOMIC_COMPILER_BARRIER) + #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type) +// +#if defined(EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY) + #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h b/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h new file mode 100644 index 00000000..2ee29711 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h @@ -0,0 +1,430 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h b/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h new file mode 100644 index 00000000..9bc1a621 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h @@ -0,0 +1,430 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128_AVAILABLE \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h b/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h new file mode 100644 index 00000000..073b3fbb --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h @@ -0,0 +1,32 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CPU_PAUSE() +// +#if defined(EASTL_COMPILER_ATOMIC_CPU_PAUSE) + + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE_AVAILABLE 1 + +#else + + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \ + ((void)0) + + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE_AVAILABLE 1 + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_exchange.h b/include/EASTL/internal/atomic/compiler/compiler_exchange.h new file mode 100644 index 00000000..d82b199d --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_exchange.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h b/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h new file mode 100644 index 00000000..e6c4238f --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h b/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h new file mode 100644 index 00000000..b0976fc7 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h b/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h new file mode 100644 index 00000000..2e6cfdac --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h b/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h new file mode 100644 index 00000000..d7ed86cc --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h b/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h new file mode 100644 index 00000000..10cf7d90 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_load.h b/include/EASTL/internal/atomic/compiler/compiler_load.h new file mode 100644 index 00000000..0c76b6bc --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_load.h @@ -0,0 +1,131 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// +#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 0 +#endif + + +/** + * NOTE: + * + * These are used for data-dependent reads thru a pointer. It is safe + * to assume that pointer-sized reads are atomic on any given platform. + * This implementation assumes the hardware doesn't reorder dependent + * loads unlike the DEC Alpha. + */ +#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_32(type, ret, ptr) \ + ret = (*EASTL_ATOMIC_VOLATILE_CAST(ptr)) \ + +#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_64(type, ret, ptr) \ + ret = (*EASTL_ATOMIC_VOLATILE_CAST(ptr)) \ + +#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 1 +#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 1 + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h b/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h new file mode 100644 index 00000000..ac3923c6 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h @@ -0,0 +1,47 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CPU_MB() +// +#if defined(EASTL_COMPILER_ATOMIC_CPU_MB) + #define EASTL_COMPILER_ATOMIC_CPU_MB_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CPU_MB_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CPU_WMB() +// +#if defined(EASTL_COMPILER_ATOMIC_CPU_WMB) + #define EASTL_COMPILER_ATOMIC_CPU_WMB_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CPU_WMB_AVAILABLE 0 +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CPU_RMB() +// +#if defined(EASTL_COMPILER_ATOMIC_CPU_RMB) + #define EASTL_COMPILER_ATOMIC_CPU_RMB_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_CPU_RMB_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h b/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h new file mode 100644 index 00000000..a26a72c7 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h b/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h new file mode 100644 index 00000000..25b0b741 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h @@ -0,0 +1,49 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*() +// +#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED) + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE) + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE) + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL) + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST) + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_store.h b/include/EASTL/internal/atomic/compiler/compiler_store.h new file mode 100644 index 00000000..1a553e2a --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_store.h @@ -0,0 +1,113 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h b/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h new file mode 100644 index 00000000..4b7eea92 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h b/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h new file mode 100644 index 00000000..01d8f0f9 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h @@ -0,0 +1,49 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_THREAD_FENCE_*() +// +#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED) + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE) + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE) + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL) + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST) + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H */ diff --git a/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h b/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h new file mode 100644 index 00000000..05680bd1 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h @@ -0,0 +1,173 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 0 +#endif + + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 0 +#endif + +#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128) + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 1 +#else + #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 0 +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h new file mode 100644 index 00000000..4b74f9b3 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h @@ -0,0 +1,139 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +/** + * NOTE: + * gcc __atomic builtins may defer to function calls in libatomic.so for architectures that do not + * support atomic instructions of a given size. These functions will be implemented with pthread_mutex_t. + * It also requires the explicit linking against the compiler runtime libatomic.so. + * On architectures that do not support atomics, like armv6 the builtins may defer to kernel helpers + * or on classic uniprocessor systems just disable interrupts. + * + * We do not want to have to link against libatomic.so or fall into the trap of our atomics degrading + * into locks. We would rather have user-code explicity use locking primitives if their code cannot + * be satisfied with atomic instructions on the given platform. + */ +static_assert(__atomic_always_lock_free(1, 0), "eastl::atomic where sizeof(T) == 1 must be lock-free!"); +static_assert(__atomic_always_lock_free(2, 0), "eastl::atomic where sizeof(T) == 2 must be lock-free!"); +static_assert(__atomic_always_lock_free(4, 0), "eastl::atomic where sizeof(T) == 4 must be lock-free!"); +#if EA_PLATFORM_PTR_SIZE == 8 + static_assert(__atomic_always_lock_free(8, 0), "eastl::atomic where sizeof(T) == 8 must be lock-free!"); +#endif + +/** + * NOTE: + * + * The following can fail on gcc/clang on 64-bit systems. + * Firstly, it depends on the -march setting on clang whether or not it calls out to libatomic. + * Second, gcc always calls out to libatomic for 128-bit atomics. It is unclear if it uses locks + * or tries to look at the cpuid and use cmpxchg16b if its available. + * gcc mailing lists argue that since load must be implemented with cmpxchg16b, then the __atomic bultin + * cannot be used in read-only memory which is why they always call out to libatomic. + * There is no way to tell gcc to not do that, unfortunately. + * We don't care about the read-only restriction because our eastl::atomic object is mutable + * and also msvc doesn't enforce this restriction thus to be fully platform agnostic we cannot either. + * + * Therefore, the follow static_assert is commented out for the time being. + * + * static_assert(__atomic_always_lock_free(16, 0), "eastl::atomic where sizeof(T) == 16 must be lock-free!"); + */ + +/** + * NOTE: + * Why we do the cast to the unsigned fixed width types for every operation even though gcc/clang builtins are generics? + * Well gcc/clang correctly-incorrectly call out to libatomic and do locking on user types that may be potentially misaligned. + * struct UserType { uint8_t a,b; }; This given struct is 16 bytes in size but has only 8 byte alignment. + * gcc/clang cannot and doesn't know that we always guarantee every type T is size aligned within eastl::atomic. + * Therefore it always emits calls into libatomic and does locking for structs like these which we do not want. + * Therefore you'll notice we always cast each atomic ptr type to the equivalent unsigned width type when doing the atomic operations. + */ + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_COMPILER_ATOMIC_HAS_8BIT +#define EASTL_COMPILER_ATOMIC_HAS_16BIT +#define EASTL_COMPILER_ATOMIC_HAS_32BIT +#define EASTL_COMPILER_ATOMIC_HAS_64BIT + +#if EA_PLATFORM_PTR_SIZE == 8 + #define EASTL_COMPILER_ATOMIC_HAS_128BIT +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, gccMemoryOrder) \ + { \ + integralType retIntegral; \ + integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \ + \ + retIntegral = fetchIntrinsic(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), valIntegral, gccMemoryOrder); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + +#define EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, weak, successOrder, failOrder) \ + ret = __atomic_compare_exchange(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \ + EASTL_ATOMIC_INTEGRAL_CAST(integralType, (expected)), \ + EASTL_ATOMIC_INTEGRAL_CAST(integralType, &(desired)), \ + weak, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_EXCHANGE_INTRIN_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + { \ + integralType retIntegral; \ + integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \ + \ + __atomic_exchange(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \ + &valIntegral, &retIntegral, gccMemoryOrder); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "compiler_gcc_fetch_add.h" +#include "compiler_gcc_fetch_sub.h" + +#include "compiler_gcc_fetch_and.h" +#include "compiler_gcc_fetch_xor.h" +#include "compiler_gcc_fetch_or.h" + +#include "compiler_gcc_add_fetch.h" +#include "compiler_gcc_sub_fetch.h" + +#include "compiler_gcc_and_fetch.h" +#include "compiler_gcc_xor_fetch.h" +#include "compiler_gcc_or_fetch.h" + +#include "compiler_gcc_exchange.h" + +#include "compiler_gcc_cmpxchg_weak.h" +#include "compiler_gcc_cmpxchg_strong.h" + +#include "compiler_gcc_load.h" +#include "compiler_gcc_store.h" + +#include "compiler_gcc_barrier.h" + +#include "compiler_gcc_cpu_pause.h" + +#include "compiler_gcc_signal_fence.h" + +#include "compiler_gcc_thread_fence.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h new file mode 100644 index 00000000..1d19196b --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_ADD_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_add_fetch, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_ADD_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_ADD_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_ADD_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_ADD_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_ADD_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h new file mode 100644 index 00000000..a35307f0 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_AND_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_and_fetch, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_AND_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_AND_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_AND_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_AND_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_AND_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h new file mode 100644 index 00000000..9920fe9f --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h @@ -0,0 +1,31 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() +// +#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() \ + __asm__ __volatile__ ("" ::: "memory") + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type) +// +#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \ + __asm__ __volatile__ ("" : /* Output Operands */ : "r"(&(val)) : "memory") + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H */ + diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h new file mode 100644 index 00000000..3e47cf2e --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h @@ -0,0 +1,182 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(integralType, type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, false, successOrder, failOrder) + + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint8_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint16_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint32_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint64_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(__uint128_t, type, ret, ptr, expected, desired, successOrder, failOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h new file mode 100644 index 00000000..f55fe3a3 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h @@ -0,0 +1,182 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(integralType, type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, true, successOrder, failOrder) + + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint8_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint16_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint32_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint64_t, type, ret, ptr, expected, desired, successOrder, failOrder) + +#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, successOrder, failOrder) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(__uint128_t, type, ret, ptr, expected, desired, successOrder, failOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type,ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h new file mode 100644 index 00000000..9d4ac35e --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h @@ -0,0 +1,31 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CPU_PAUSE() +// +#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64) + + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \ + __asm__ __volatile__ ("pause") + +#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64) + + #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \ + __asm__ __volatile__ ("yield") + +#endif + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h new file mode 100644 index 00000000..a3325547 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_EXCHANGE_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_INTRIN_N(integralType, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_EXCHANGE_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h new file mode 100644 index 00000000..98abbb83 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_FETCH_ADD_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_add, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_ADD_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_ADD_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_ADD_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_ADD_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_ADD_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h new file mode 100644 index 00000000..0dfb81db --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_FETCH_AND_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_and, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_AND_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_AND_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_AND_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_AND_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_AND_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h new file mode 100644 index 00000000..ba259b74 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_FETCH_OR_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_or, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_OR_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_OR_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_OR_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_OR_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_OR_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h new file mode 100644 index 00000000..c8be225e --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_FETCH_SUB_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_sub, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_SUB_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_SUB_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_SUB_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_SUB_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_SUB_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h new file mode 100644 index 00000000..4ec6d676 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_FETCH_XOR_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_xor, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_XOR_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_XOR_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_XOR_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_XOR_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_XOR_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h new file mode 100644 index 00000000..a4a3ebf1 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_LOAD_N(integralType, type, ret, ptr, gccMemoryOrder) \ + { \ + integralType retIntegral; \ + __atomic_load(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), &retIntegral, gccMemoryOrder); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + +#define EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_LOAD_N(uint8_t, type, ret, ptr, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_LOAD_N(uint16_t, type, ret, ptr, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_LOAD_N(uint32_t, type, ret, ptr, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_LOAD_N(uint64_t, type, ret, ptr, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_LOAD_N(__uint128_t, type, ret, ptr, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_LOAD_*_N(type, type ret, type * ptr) +// +#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \ + EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h new file mode 100644 index 00000000..9e4db3e1 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_OR_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_or_fetch, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_OR_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_OR_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_OR_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_OR_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_OR_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h new file mode 100644 index 00000000..16dff14f --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h @@ -0,0 +1,38 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_SIGNAL_FENCE(gccMemoryOrder) \ + __atomic_signal_fence(gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*() +// +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED() \ + EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE() \ + EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE() \ + EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL() \ + EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST() \ + EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h new file mode 100644 index 00000000..04a28ac4 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h @@ -0,0 +1,89 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_STORE_N(integralType, ptr, val, gccMemoryOrder) \ + { \ + integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \ + __atomic_store(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), &valIntegral, gccMemoryOrder); \ + } + + +#define EASTL_GCC_ATOMIC_STORE_8(ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_STORE_N(uint8_t, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_STORE_16(ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_STORE_N(uint16_t, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_STORE_32(ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_STORE_N(uint32_t, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_STORE_64(ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_STORE_N(uint64_t, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_STORE_128(ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_STORE_N(__uint128_t, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_STORE_*_N(type, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \ + EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h new file mode 100644 index 00000000..62f8cd91 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_SUB_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_sub_fetch, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_SUB_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_SUB_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_SUB_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_SUB_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_SUB_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h new file mode 100644 index 00000000..0dd005e4 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h @@ -0,0 +1,38 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_THREAD_FENCE(gccMemoryOrder) \ + __atomic_thread_fence(gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_THREAD_FENCE_*() +// +#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED() \ + EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE() \ + EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE() \ + EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL() \ + EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST() \ + EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H */ diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h new file mode 100644 index 00000000..4827d79f --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h @@ -0,0 +1,118 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_GCC_ATOMIC_XOR_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_xor_fetch, type, ret, ptr, val, gccMemoryOrder) + + +#define EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_XOR_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_XOR_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_XOR_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_XOR_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder) + +#define EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \ + EASTL_GCC_ATOMIC_XOR_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h new file mode 100644 index 00000000..3e9d533b --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h @@ -0,0 +1,223 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +EA_DISABLE_ALL_VC_WARNINGS(); +#include +#include +EA_RESTORE_ALL_VC_WARNINGS(); + + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_COMPILER_ATOMIC_HAS_8BIT +#define EASTL_COMPILER_ATOMIC_HAS_16BIT +#define EASTL_COMPILER_ATOMIC_HAS_32BIT +#define EASTL_COMPILER_ATOMIC_HAS_64BIT + +#if EA_PLATFORM_PTR_SIZE == 8 + #define EASTL_COMPILER_ATOMIC_HAS_128BIT +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +/** + * NOTE: + * Unfortunately MSVC Intrinsics depend on the architecture + * that we are compiling for. + * These are some indirection macros to make our lives easier and + * ensure the least possible amount of copy-paste to reduce programmer errors. + */ +#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64) + + + #define EASTL_MSVC_ATOMIC_FETCH_OP(ret, ptr, val, MemoryOrder, Intrinsic) \ + ret = Intrinsic(ptr, val) + + #define EASTL_MSVC_ATOMIC_EXCHANGE_OP(ret, ptr, val, MemoryOrder, Intrinsic) \ + ret = Intrinsic(ptr, val) + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(ret, ptr, comparand, exchange, MemoryOrder, Intrinsic) \ + ret = Intrinsic(ptr, exchange, comparand) + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(ret, ptr, comparandResult, exchangeHigh, exchangeLow, MemoryOrder) \ + ret = _InterlockedCompareExchange128(ptr, exchangeHigh, exchangeLow, comparandResult) + + +#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64) + + + #define EASTL_MSVC_INTRINSIC_RELAXED(Intrinsic) \ + EA_PREPROCESSOR_JOIN(Intrinsic, _nf) + + #define EASTL_MSVC_INTRINSIC_ACQUIRE(Intrinsic) \ + EA_PREPROCESSOR_JOIN(Intrinsic, _acq) + + #define EASTL_MSVC_INTRINSIC_RELEASE(Intrinsic) \ + EA_PREPROCESSOR_JOIN(Intrinsic, _rel) + + #define EASTL_MSVC_INTRINSIC_ACQ_REL(Intrinsic) \ + Intrinsic + + #define EASTL_MSVC_INTRINSIC_SEQ_CST(Intrinsic) \ + Intrinsic + + + #define EASTL_MSVC_ATOMIC_FETCH_OP(ret, ptr, val, MemoryOrder, Intrinsic) \ + ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, val) + + #define EASTL_MSVC_ATOMIC_EXCHANGE_OP(ret, ptr, val, MemoryOrder, Intrinsic) \ + ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, val) + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(ret, ptr, comparand, exchange, MemoryOrder, Intrinsic) \ + ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, exchange, comparand) + + #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(ret, ptr, comparandResult, exchangeHigh, exchangeLow, MemoryOrder) \ + ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(_InterlockedCompareExchange128)(ptr, exchangeHigh, exchangeLow, comparandResult) + + +#endif + + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_MSVC_NOP_POST_INTRIN_COMPUTE(ret, lhs, rhs) + +#define EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE(ret, val) \ + ret = (val) + + +#define EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) \ + { \ + integralType retIntegral; \ + type valCompute; \ + \ + PRE_INTRIN_COMPUTE(valCompute, (val)); \ + const integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, valCompute); \ + \ + EASTL_MSVC_ATOMIC_FETCH_OP(retIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \ + valIntegral, MemoryOrder, fetchIntrinsic); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + POST_INTRIN_COMPUTE(ret, ret, (val)); \ + } + +#define EASTL_MSVC_ATOMIC_EXCHANGE_INTRIN_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) \ + { \ + integralType retIntegral; \ + EASTL_MSVC_ATOMIC_EXCHANGE_OP(retIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \ + EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)), MemoryOrder, \ + exchangeIntrinsic); \ + \ + ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \ + } + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) \ + { \ + integralType comparandIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, *(expected)); \ + integralType oldIntegral; \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(oldIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \ + comparandIntegral, EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (desired)), \ + MemoryOrder, cmpxchgStrongIntrinsic); \ + \ + if (oldIntegral == comparandIntegral) \ + { \ + ret = true; \ + } \ + else \ + { \ + *(expected) = EASTL_ATOMIC_TYPE_PUN_CAST(type, oldIntegral); \ + ret = false; \ + } \ + } + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_128(type, ret, ptr, expected, desired, MemoryOrder) \ + { \ + union TypePun \ + { \ + type templateType; \ + \ + struct exchange128 \ + { \ + EASTL_SYSTEM_BIG_ENDIAN_STATEMENT(__int64 hi, lo); \ + \ + EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT(__int64 lo, hi); \ + }; \ + \ + struct exchange128 exchangePun; \ + }; \ + \ + union TypePun typePun = { (desired) }; \ + \ + unsigned char cmpxchgRetChar; \ + cmpxchgRetChar = EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(cmpxchgRetChar, EASTL_ATOMIC_VOLATILE_TYPE_CAST(__int64, (ptr)), \ + EASTL_ATOMIC_TYPE_CAST(__int64, (expected)), \ + typePun.exchangePun.hi, typePun.exchangePun.lo, \ + MemoryOrder); \ + \ + ret = static_cast(cmpxchgRetChar); \ + } + + +///////////////////////////////////////////////////////////////////////////////// + + +#define EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE) \ + EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, EASTL_MSVC_NOP_POST_INTRIN_COMPUTE) + +#define EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) \ + EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) + +#define EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_EXCHANGE_INTRIN_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_128(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_128(type, ret, ptr, expected, desired, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// + + +#include "compiler_msvc_fetch_add.h" +#include "compiler_msvc_fetch_sub.h" + +#include "compiler_msvc_fetch_and.h" +#include "compiler_msvc_fetch_xor.h" +#include "compiler_msvc_fetch_or.h" + +#include "compiler_msvc_add_fetch.h" +#include "compiler_msvc_sub_fetch.h" + +#include "compiler_msvc_and_fetch.h" +#include "compiler_msvc_xor_fetch.h" +#include "compiler_msvc_or_fetch.h" + +#include "compiler_msvc_exchange.h" + +#include "compiler_msvc_cmpxchg_weak.h" +#include "compiler_msvc_cmpxchg_strong.h" + +#include "compiler_msvc_barrier.h" + +#include "compiler_msvc_cpu_pause.h" + +#include "compiler_msvc_signal_fence.h" + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h new file mode 100644 index 00000000..f7f0c39b --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h @@ -0,0 +1,104 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_ADD_FETCH_POST_INTRIN_COMPUTE(ret, val, addend) \ + ret = (val) + (addend) + +#define EASTL_MSVC_ATOMIC_ADD_FETCH_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_ADD_FETCH_POST_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_N(long long, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h new file mode 100644 index 00000000..66f89ef2 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h @@ -0,0 +1,104 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_AND_FETCH_POST_INTRIN_COMPUTE(ret, val, andend) \ + ret = (val) & (andend) + +#define EASTL_MSVC_ATOMIC_AND_FETCH_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_AND_FETCH_POST_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_AND_FETCH_N(char, _InterlockedAnd8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_AND_FETCH_N(short, _InterlockedAnd16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_AND_FETCH_N(long, _InterlockedAnd, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_AND_FETCH_N(long long, _InterlockedAnd64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h new file mode 100644 index 00000000..02e2d03a --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h @@ -0,0 +1,31 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() +// +#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() \ + _ReadWriteBarrier() + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type) +// +#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \ + EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_FUNC(const_cast(eastl::addressof((val)))); \ + EASTL_ATOMIC_COMPILER_BARRIER() + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h new file mode 100644 index 00000000..427d3498 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h @@ -0,0 +1,178 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(char, _InterlockedCompareExchange8, type, ret, ptr, expected, desired, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(short, _InterlockedCompareExchange16, type, ret, ptr, expected, desired, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(long, _InterlockedCompareExchange, type, ret, ptr, expected, desired, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(long long, _InterlockedCompareExchange64, type, ret, ptr, expected, desired, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, MemoryOrder) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_128(type, ret, ptr, expected, desired, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, RELAXED) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, RELEASE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, RELEASE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, RELEASE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, RELEASE) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h new file mode 100644 index 00000000..8f4147ac --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h @@ -0,0 +1,162 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired) +// +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) + + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) + +#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \ + EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h new file mode 100644 index 00000000..720701ab --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h @@ -0,0 +1,27 @@ +///////////////////////////////////////////////////////////////////////////////// +// copyright (c) electronic arts inc. all rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_CPU_PAUSE() +// +// NOTE: +// Rather obscure macro in Windows.h that expands to pause or rep; nop on +// compatible x86 cpus or the arm yield on compatible arm processors. +// This is nicer than switching on platform specific intrinsics. +// +#define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \ + YieldProcessor() + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h new file mode 100644 index 00000000..93055e54 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h @@ -0,0 +1,125 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(char, _InterlockedExchange8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(short, _InterlockedExchange16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(long, _InterlockedExchange, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(long long, _InterlockedExchange64, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \ + { \ + bool cmpxchgRet; \ + /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \ + /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \ + /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \ + /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \ + ret = *(ptr); \ + do \ + { \ + EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), val); \ + } while (!cmpxchgRet); \ + } + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h new file mode 100644 index 00000000..4cb05874 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h @@ -0,0 +1,101 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_ATOMIC_FETCH_ADD_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_N(long long, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h new file mode 100644 index 00000000..c04f86df --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h @@ -0,0 +1,101 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_ATOMIC_FETCH_AND_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_AND_N(char, _InterlockedAnd8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_AND_N(short, _InterlockedAnd16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_AND_N(long, _InterlockedAnd, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_AND_N(long long, _InterlockedAnd64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h new file mode 100644 index 00000000..a592bdff --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h @@ -0,0 +1,101 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_ATOMIC_FETCH_OR_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OR_N(char, _InterlockedOr8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OR_N(short, _InterlockedOr16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OR_N(long, _InterlockedOr, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OR_N(long long, _InterlockedOr64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h new file mode 100644 index 00000000..25f41f97 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h @@ -0,0 +1,104 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_FETCH_SUB_PRE_INTRIN_COMPUTE(ret, val) \ + ret = EASTL_ATOMIC_NEGATE_OPERAND((val)) + +#define EASTL_MSVC_ATOMIC_FETCH_SUB_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_FETCH_SUB_PRE_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_N(long long, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h new file mode 100644 index 00000000..7402e20d --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h @@ -0,0 +1,101 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_ATOMIC_FETCH_XOR_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_N(char, _InterlockedXor8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_N(short, _InterlockedXor16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_N(long, _InterlockedXor, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_N(long long, _InterlockedXor64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h new file mode 100644 index 00000000..fe4218a7 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h @@ -0,0 +1,104 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_OR_FETCH_POST_INTRIN_COMPUTE(ret, val, orend) \ + ret = (val) | (orend) + +#define EASTL_MSVC_ATOMIC_OR_FETCH_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_OR_FETCH_POST_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OR_FETCH_N(char, _InterlockedOr8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OR_FETCH_N(short, _InterlockedOr16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OR_FETCH_N(long, _InterlockedOr, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OR_FETCH_N(long long, _InterlockedOr64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h new file mode 100644 index 00000000..f35f5772 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h @@ -0,0 +1,34 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*() +// +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED() \ + EASTL_ATOMIC_COMPILER_BARRIER() + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE() \ + EASTL_ATOMIC_COMPILER_BARRIER() + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE() \ + EASTL_ATOMIC_COMPILER_BARRIER() + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL() \ + EASTL_ATOMIC_COMPILER_BARRIER() + +#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST() \ + EASTL_ATOMIC_COMPILER_BARRIER() + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h new file mode 100644 index 00000000..97be65d1 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h @@ -0,0 +1,107 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_SUB_FETCH_PRE_INTRIN_COMPUTE(ret, val) \ + ret = EASTL_ATOMIC_NEGATE_OPERAND((val)) + +#define EASTL_MSVC_SUB_FETCH_POST_INTRIN_COMPUTE(ret, val, subend) \ + ret = (val) - (subend) + +#define EASTL_MSVC_ATOMIC_SUB_FETCH_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_SUB_FETCH_PRE_INTRIN_COMPUTE, EASTL_MSVC_SUB_FETCH_POST_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_N(long long, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H */ diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h new file mode 100644 index 00000000..61409b81 --- /dev/null +++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h @@ -0,0 +1,104 @@ +///////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H +#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +#define EASTL_MSVC_XOR_FETCH_POST_INTRIN_COMPUTE(ret, val, xorend) \ + ret = (val) ^ (xorend) + +#define EASTL_MSVC_ATOMIC_XOR_FETCH_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder, \ + EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_XOR_FETCH_POST_INTRIN_COMPUTE) + + +#define EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_N(char, _InterlockedXor8, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_N(short, _InterlockedXor16, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_N(long, _InterlockedXor, type, ret, ptr, val, MemoryOrder) + +#define EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, MemoryOrder) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_N(long long, _InterlockedXor64, type, ret, ptr, val, MemoryOrder) + + +///////////////////////////////////////////////////////////////////////////////// +// +// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val) +// +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, RELAXED) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, RELAXED) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, ACQUIRE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, ACQUIRE) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, RELEASE) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, RELEASE) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, ACQ_REL) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, ACQ_REL) + + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, SEQ_CST) + +#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \ + EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, SEQ_CST) + + +#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H */ diff --git a/include/EASTL/internal/config.h b/include/EASTL/internal/config.h index a824609a..cc881276 100644 --- a/include/EASTL/internal/config.h +++ b/include/EASTL/internal/config.h @@ -89,8 +89,8 @@ /////////////////////////////////////////////////////////////////////////////// #ifndef EASTL_VERSION - #define EASTL_VERSION "3.16.07" - #define EASTL_VERSION_N 31607 + #define EASTL_VERSION "3.17.00" + #define EASTL_VERSION_N 31700 #endif @@ -143,14 +143,7 @@ // http://en.wikipedia.org/wiki/C%2B%2B14#Relaxed_constexpr_restrictions // #if !defined(EA_CPP14_CONSTEXPR) - - #if defined(EA_COMPILER_MSVC_2015) - #define EA_CPP14_CONSTEXPR // not supported - #define EA_NO_CPP14_CONSTEXPR - #elif defined(__GNUC__) && (EA_COMPILER_VERSION < 9000) // Before GCC 9.0 - #define EA_CPP14_CONSTEXPR // not supported - #define EA_NO_CPP14_CONSTEXPR - #elif defined(EA_COMPILER_CPP14_ENABLED) + #if defined(EA_COMPILER_CPP14_ENABLED) #define EA_CPP14_CONSTEXPR constexpr #else #define EA_CPP14_CONSTEXPR // not supported @@ -832,7 +825,7 @@ namespace eastl // Defined as 0 or 1. // #ifndef EASTL_INT128_SUPPORTED - #if defined(EA_COMPILER_INTMAX_SIZE) && (EA_COMPILER_INTMAX_SIZE >= 16) // If the compiler supports int128_t (recent versions of GCC do)... + #if defined(__SIZEOF_INT128__) || (defined(EA_COMPILER_INTMAX_SIZE) && (EA_COMPILER_INTMAX_SIZE >= 16)) #define EASTL_INT128_SUPPORTED 1 #else #define EASTL_INT128_SUPPORTED 0 @@ -871,7 +864,7 @@ namespace eastl #if EASTL_INT128_SUPPORTED #define EASTL_INT128_DEFINED 1 - #if defined(__GNUC__) + #if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG) typedef __int128_t eastl_int128_t; typedef __uint128_t eastl_uint128_t; #else @@ -1795,6 +1788,7 @@ typedef EASTL_SSIZE_T eastl_ssize_t; // Signed version of eastl_size_t. Concept #ifdef _MSC_VER #pragma warning(disable: 4455) // disable warning C4455: literal suffix identifiers that do not start with an underscore are reserved #endif + #else #define EASTL_USER_LITERALS_ENABLED 0 #endif @@ -1855,7 +1849,6 @@ typedef EASTL_SSIZE_T eastl_ssize_t; // Signed version of eastl_size_t. Concept #endif - /// EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR /// This feature define allows users to toggle the problematic eastl::pair implicit /// single element constructor. @@ -1863,4 +1856,22 @@ typedef EASTL_SSIZE_T eastl_ssize_t; // Signed version of eastl_size_t. Concept #define EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR 0 #endif +/// EASTL_SYSTEM_BIG_ENDIAN_STATEMENT +/// EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT +/// These macros allow you to write endian specific macros as statements. +/// This allows endian specific code to be macro expanded from within other macros +/// +#if defined(EA_SYSTEM_BIG_ENDIAN) + #define EASTL_SYSTEM_BIG_ENDIAN_STATEMENT(...) __VA_ARGS__ +#else + #define EASTL_SYSTEM_BIG_ENDIAN_STATEMENT(...) +#endif + +#if defined(EA_SYSTEM_LITTLE_ENDIAN) + #define EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT(...) __VA_ARGS__ +#else + #define EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT(...) +#endif + + #endif // Header include guard diff --git a/include/EASTL/internal/fixed_pool.h b/include/EASTL/internal/fixed_pool.h index 0b610ed6..5a380046 100644 --- a/include/EASTL/internal/fixed_pool.h +++ b/include/EASTL/internal/fixed_pool.h @@ -30,18 +30,13 @@ #include #include -#ifdef _MSC_VER - #pragma warning(push, 0) - #include - #pragma warning(pop) -#else - #include -#endif -#if defined(_MSC_VER) - #pragma warning(push) - #pragma warning(disable: 4275) // non dll-interface class used as base for DLL-interface classkey 'identifier' -#endif +EA_DISABLE_ALL_VC_WARNINGS(); +#include +EA_RESTORE_ALL_VC_WARNINGS(); + +// 4275 - non dll-interface class used as base for DLL-interface classkey 'identifier' +EA_DISABLE_VC_WARNING(4275); namespace eastl @@ -1630,10 +1625,7 @@ namespace eastl } // namespace eastl -#if defined(_MSC_VER) - #pragma warning(pop) -#endif +EA_RESTORE_VC_WARNING(); #endif // Header include guard - diff --git a/include/EASTL/internal/function_detail.h b/include/EASTL/internal/function_detail.h index 17f281de..7d52d55a 100644 --- a/include/EASTL/internal/function_detail.h +++ b/include/EASTL/internal/function_detail.h @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -313,7 +312,86 @@ namespace eastl } #endif // EASTL_RTTI_ENABLED - static R Invoker(const FunctorStorageType& functor, Args... args) + /** + * NOTE: + * + * The order of arguments here is vital to the call optimization. Let's dig into why and look at some asm. + * We have two invoker signatures to consider: + * R Invoker(const FunctorStorageType& functor, Args... args) + * R Invoker(Args... args, const FunctorStorageType& functor) + * + * Assume we are using the Windows x64 Calling Convention where the first 4 arguments are passed into + * RCX, RDX, R8, R9. This optimization works for any Calling Convention, we are just using Windows x64 for + * this example. + * + * Given the following member function: void TestMemberFunc(int a, int b) + * RCX == this + * RDX == a + * R8 == b + * + * All three arguments to the function including the hidden this pointer, which in C++ is always the first argument + * are passed into the first three registers. + * The function call chain for eastl::function<>() is as follows: + * operator ()(this, Args... args) -> Invoker(Args... args, this->mStorage) -> StoredFunction(Args... arg) + * + * Let's look at what is happening at the asm level with the different Invoker function signatures and why. + * + * You will notice that operator ()() and Invoker() have the arguments reversed. operator ()() just directly calls + * to Invoker(), it is a tail call, so we force inline the call operator to ensure we directly call to the Invoker(). + * Most compilers always inline it anyways by default; have been instances where it doesn't even though the asm ends + * up being cheaper. + * call -> call -> call versus call -> call + * + * eastl::function = FunctionPointer + * + * Assume we have the above eastl::function object that holds a pointer to a function as the internal callable. + * + * Invoker(this->mStorage, Args... args) is called with the follow arguments in registers: + * RCX = this | RDX = a | R8 = b + * + * Inside Invoker() we use RCX to deference into the eastl::function object and get the function pointer to call. + * This function to call has signature Func(int, int) and thus requires its arguments in registers RCX and RDX. + * The compiler must shift all the arguments towards the left. The full asm looks something as follows. + * + * Calling Invoker: Inside Invoker: + * + * mov rcx, this mov rax, [rcx] + * mov rdx, a mov rcx, rdx + * mov r8, b mov rdx, r8 + * call [rcx + offset to Invoker] jmp [rax] + * + * Notice how the compiler shifts all the arguments before calling the callable and also we only use the this pointer + * to access the internal storage inside the eastl::function object. + * + * Invoker(Args... args, this->mStorage) is called with the following arguments in registers: + * RCX = a | RDX = b | R8 = this + * + * You can see we no longer have to shift the arguments down when going to call the internal stored callable. + * + * Calling Invoker: Inside Invoker: + * + * mov rcx, a mov rax, [r8] + * mov rdx, b jmp [rax] + * mov r8, this + * call [r8 + offset to Invoker] + * + * The generated asm does a straight tail jmp to the loaded function pointer. The arguments are already in the correct + * registers. + * + * For Functors or Lambdas with no captures, this gives us another free register to use to pass arguments since the this + * is at the end, it can be passed onto the stack if we run out of registers. Since the callable has no captures; inside + * the Invoker(), we won't ever need to touch this thus we can just call the operator ()() or let the compiler inline it. + * + * For a callable with captures there is no perf hit since the callable in the common case is inlined and the pointer to the callable + * buffer is passed in a register which the compiler can use to access the captures. + * + * For eastl::function that a holds a pointer to member function. The this pointers is implicitly + * the first argument in the argument list, const T&, and the member function pointer will be called on that object. + * This prevents any argument shifting since the this for the member function pointer is already in RCX. + * + * This is why having this at the end of the argument list is important for generating efficient Invoker() thunks. + */ + static R Invoker(Args... args, const FunctorStorageType& functor) { return eastl::invoke(*Base::GetFunctorPtr(functor), eastl::forward(args)...); } @@ -405,7 +483,7 @@ namespace eastl { Destroy(); mMgrFuncPtr = nullptr; - mInvokeFuncPtr = nullptr; + mInvokeFuncPtr = &DefaultInvoker; return *this; } @@ -459,17 +537,9 @@ namespace eastl return HaveManager(); } - R operator ()(Args... args) const + EASTL_FORCE_INLINE R operator ()(Args... args) const { - #if EASTL_EXCEPTIONS_ENABLED - if (!HaveManager()) - { - throw eastl::bad_function_call(); - } - #else - EASTL_ASSERT_MSG(HaveManager(), "function_detail call on an empty function_detail"); - #endif - return (*mInvokeFuncPtr)(mStorage, eastl::forward(args)...); + return (*mInvokeFuncPtr)(eastl::forward(args)..., this->mStorage); } #if EASTL_RTTI_ENABLED @@ -547,7 +617,7 @@ namespace eastl mMgrFuncPtr = other.mMgrFuncPtr; mInvokeFuncPtr = other.mInvokeFuncPtr; other.mMgrFuncPtr = nullptr; - other.mInvokeFuncPtr = nullptr; + other.mInvokeFuncPtr = &DefaultInvoker; } template @@ -559,7 +629,7 @@ namespace eastl if (internal::is_null(functor)) { mMgrFuncPtr = nullptr; - mInvokeFuncPtr = nullptr; + mInvokeFuncPtr = &DefaultInvoker; } else { @@ -571,10 +641,29 @@ namespace eastl private: typedef void* (*ManagerFuncPtr)(void*, void*, typename Base::ManagerOperations); - typedef R (*InvokeFuncPtr)(const FunctorStorageType&, Args...); + typedef R (*InvokeFuncPtr)(Args..., const FunctorStorageType&); + + EA_DISABLE_GCC_WARNING(-Wreturn-type); + EA_DISABLE_CLANG_WARNING(-Wreturn-type); + EA_DISABLE_VC_WARNING(4716); // 'function' must return a value + // We cannot assume that R is default constructible. + // This function is called only when the function object CANNOT be called because it is empty, + // it will always throw or assert so we never use the return value anyways and neither should the caller. + static R DefaultInvoker(Args... args, const FunctorStorageType& functor) + { + #if EASTL_EXCEPTIONS_ENABLED + throw eastl::bad_function_call(); + #else + EASTL_ASSERT_MSG(false, "function_detail call on an empty function_detail"); + #endif + }; + EA_RESTORE_VC_WARNING(); + EA_RESTORE_CLANG_WARNING(); + EA_RESTORE_GCC_WARNING(); + ManagerFuncPtr mMgrFuncPtr = nullptr; - InvokeFuncPtr mInvokeFuncPtr = nullptr; + InvokeFuncPtr mInvokeFuncPtr = &DefaultInvoker; }; } // namespace internal diff --git a/include/EASTL/internal/generic_iterator.h b/include/EASTL/internal/generic_iterator.h index 8aa630fb..b32998a8 100644 --- a/include/EASTL/internal/generic_iterator.h +++ b/include/EASTL/internal/generic_iterator.h @@ -23,12 +23,9 @@ #include #include - -#ifdef _MSC_VER - #pragma warning(push) // VC++ generates a bogus warning that you cannot code away. - #pragma warning(disable: 4619) // There is no warning number 'number'. - #pragma warning(disable: 4217) // Member template functions cannot be used for copy-assignment or copy-construction. -#endif +// There is no warning number 'number'. +// Member template functions cannot be used for copy-assignment or copy-construction. +EA_DISABLE_VC_WARNING(4619 4217); namespace eastl @@ -205,25 +202,7 @@ namespace eastl } // namespace eastl -#ifdef _MSC_VER - #pragma warning(pop) -#endif +EA_RESTORE_VC_WARNING(); #endif // Header include guard - - - - - - - - - - - - - - - - diff --git a/include/EASTL/internal/hashtable.h b/include/EASTL/internal/hashtable.h index d45c432f..2b634051 100644 --- a/include/EASTL/internal/hashtable.h +++ b/include/EASTL/internal/hashtable.h @@ -48,12 +48,10 @@ EA_DISABLE_ALL_VC_WARNINGS() #include EA_RESTORE_ALL_VC_WARNINGS() -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable: 4512) // 'class' : assignment operator could not be generated. - #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc - #pragma warning(disable: 4571) // catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. -#endif +// 4512 - 'class' : assignment operator could not be generated. +// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc +// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. +EA_DISABLE_VC_WARNING(4512 4530 4571); namespace eastl @@ -3218,18 +3216,7 @@ namespace eastl } // namespace eastl -#ifdef _MSC_VER - #pragma warning(pop) -#endif +EA_RESTORE_VC_WARNING(); #endif // Header include guard - - - - - - - - - diff --git a/include/EASTL/internal/intrusive_hashtable.h b/include/EASTL/internal/intrusive_hashtable.h index 269a6720..dccca5b1 100644 --- a/include/EASTL/internal/intrusive_hashtable.h +++ b/include/EASTL/internal/intrusive_hashtable.h @@ -29,18 +29,11 @@ #include #include -#ifdef _MSC_VER - #pragma warning(push, 0) - #include - #include - #include - #pragma warning(pop) -#else - #include - #include - #include -#endif - +EA_DISABLE_ALL_VC_WARNINGS(); +#include +#include +#include +EA_RESTORE_ALL_VC_WARNINGS(); namespace eastl @@ -994,12 +987,3 @@ namespace eastl #endif // Header include guard - - - - - - - - - diff --git a/include/EASTL/internal/red_black_tree.h b/include/EASTL/internal/red_black_tree.h index cc198feb..802d5fd2 100644 --- a/include/EASTL/internal/red_black_tree.h +++ b/include/EASTL/internal/red_black_tree.h @@ -27,12 +27,10 @@ EA_DISABLE_ALL_VC_WARNINGS() EA_RESTORE_ALL_VC_WARNINGS() -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable: 4512) // 'class' : assignment operator could not be generated - #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc - #pragma warning(disable: 4571) // catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. -#endif +// 4512 - 'class' : assignment operator could not be generated +// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc +// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. +EA_DISABLE_VC_WARNING(4512 4530 4571); namespace eastl @@ -2045,7 +2043,7 @@ namespace eastl inline typename rbtree::node_type* rbtree::DoAllocateNode() { - auto* pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0); + auto* pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0); EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); return pNode; @@ -2331,22 +2329,7 @@ namespace eastl } // namespace eastl -#ifdef _MSC_VER - #pragma warning(pop) -#endif +EA_RESTORE_VC_WARNING(); #endif // Header include guard - - - - - - - - - - - - - diff --git a/include/EASTL/internal/smart_ptr.h b/include/EASTL/internal/smart_ptr.h index 6eab3f8a..f1d52e1b 100644 --- a/include/EASTL/internal/smart_ptr.h +++ b/include/EASTL/internal/smart_ptr.h @@ -61,8 +61,8 @@ namespace eastl #define EASTL_TYPE_TRAIT_is_array_cv_convertible_CONFORMANCE 1 - template ::element_type>::type, - typename eastl::remove_cv::element_type>::type>::value> + template ::element_type>, + eastl::remove_cv_t::element_type>>> struct is_array_cv_convertible_impl : public eastl::is_convertible {}; // Return true if P1 is convertible to P2. @@ -70,7 +70,7 @@ namespace eastl struct is_array_cv_convertible_impl : public eastl::false_type {}; // P1's underlying type is not the same as P2's, so it can't be converted, even if P2 refers to a subclass of P1. Parent == Child, but Parent[] != Child[] - template ::value && !eastl::is_pointer::value> + template && !eastl::is_pointer_v> struct is_array_cv_convertible : public is_array_cv_convertible_impl {}; diff --git a/include/EASTL/internal/thread_support.h b/include/EASTL/internal/thread_support.h index 747d9946..80386d20 100644 --- a/include/EASTL/internal/thread_support.h +++ b/include/EASTL/internal/thread_support.h @@ -37,15 +37,12 @@ #include #endif +// copy constructor could not be generated because a base class copy constructor is inaccessible or deleted. +// assignment operator could not be generated because a base class assignment operator is inaccessible or deleted. +// non dll-interface class used as base for DLL-interface classkey 'identifier'. +EA_DISABLE_VC_WARNING(4625 4626 4275); -#if defined(_MSC_VER) - #pragma warning(push) - #pragma warning(disable: 4625) // copy constructor could not be generated because a base class copy constructor is inaccessible or deleted. - #pragma warning(disable: 4626) // assignment operator could not be generated because a base class assignment operator is inaccessible or deleted. - #pragma warning(disable: 4275) // non dll-interface class used as base for DLL-interface classkey 'identifier'. -#endif - #if defined(EA_PLATFORM_MICROSOFT) #if defined(EA_PROCESSOR_POWERPC) extern "C" long __stdcall _InterlockedIncrement(long volatile* Addend); @@ -241,17 +238,7 @@ namespace eastl } // namespace eastl -#if defined(_MSC_VER) - #pragma warning(pop) -#endif +EA_RESTORE_VC_WARNING(); #endif // Header include guard - - - - - - - - diff --git a/include/EASTL/internal/type_fundamental.h b/include/EASTL/internal/type_fundamental.h index a90cb445..950d15e3 100644 --- a/include/EASTL/internal/type_fundamental.h +++ b/include/EASTL/internal/type_fundamental.h @@ -139,6 +139,10 @@ namespace eastl #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type which is already handled above... template <> struct is_integral_helper : public true_type{}; #endif + #if EASTL_INT128_SUPPORTED && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) + template <> struct is_integral_helper<__int128_t> : public true_type{}; + template <> struct is_integral_helper<__uint128_t> : public true_type{}; + #endif template struct is_integral : public eastl::is_integral_helper::type>{}; diff --git a/include/EASTL/iterator.h b/include/EASTL/iterator.h index e12cd012..6fffd5da 100644 --- a/include/EASTL/iterator.h +++ b/include/EASTL/iterator.h @@ -11,36 +11,27 @@ #include #include -#ifdef _MSC_VER - #pragma warning(push, 0) -#endif +EA_DISABLE_ALL_VC_WARNINGS(); #include -#ifdef _MSC_VER - #pragma warning(pop) -#endif +EA_RESTORE_ALL_VC_WARNINGS(); // If the user has specified that we use std iterator // categories instead of EASTL iterator categories, // then #include . #if EASTL_STD_ITERATOR_CATEGORY_ENABLED - #ifdef _MSC_VER - #pragma warning(push, 0) - #endif - #include - #ifdef _MSC_VER - #pragma warning(pop) - #endif -#endif + EA_DISABLE_ALL_VC_WARNINGS(); + #include -#ifdef _MSC_VER - #pragma warning(push) // VC++ generates a bogus warning that you cannot code away. - #pragma warning(disable: 4619) // There is no warning number 'number'. - #pragma warning(disable: 4217) // Member template functions cannot be used for copy-assignment or copy-construction. + EA_RESTORE_ALL_VC_WARNINGS(); #endif + +EA_DISABLE_VC_WARNING(4619); // There is no warning number 'number'. +EA_DISABLE_VC_WARNING(4217); // Member template functions cannot be used for copy-assignment or copy-construction. + #if defined(EA_PRAGMA_ONCE_SUPPORTED) #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. #endif @@ -1192,14 +1183,7 @@ namespace eastl -#if defined(_MSC_VER) - #pragma warning(pop) -#endif - +EA_RESTORE_VC_WARNING(); +EA_RESTORE_VC_WARNING(); #endif // Header include guard - - - - - diff --git a/include/EASTL/list.h b/include/EASTL/list.h index 023bccee..680dcad7 100644 --- a/include/EASTL/list.h +++ b/include/EASTL/list.h @@ -48,13 +48,12 @@ EA_DISABLE_ALL_VC_WARNINGS() #include EA_RESTORE_ALL_VC_WARNINGS() -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc - #pragma warning(disable: 4345) // Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized - #pragma warning(disable: 4571) // catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. - #pragma warning(disable: 4623) // default constructor was implicitly defined as deleted -#endif + +// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc +// 4345 - Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized +// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. +// 4623 - default constructor was implicitly defined as deleted +EA_DISABLE_VC_WARNING(4530 4345 4571 4623); #if defined(EA_PRAGMA_ONCE_SUPPORTED) @@ -2163,34 +2162,7 @@ namespace eastl EA_RESTORE_SN_WARNING() -#ifdef _MSC_VER - #pragma warning(pop) -#endif +EA_RESTORE_VC_WARNING(); #endif // Header include guard - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/include/EASTL/memory.h b/include/EASTL/memory.h index 6d6b8a3b..d1bdc493 100644 --- a/include/EASTL/memory.h +++ b/include/EASTL/memory.h @@ -72,7 +72,6 @@ #include #include #include -#include #include #include #include @@ -85,12 +84,12 @@ EA_DISABLE_ALL_VC_WARNINGS() #include EA_RESTORE_ALL_VC_WARNINGS() -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc - #pragma warning(disable: 4146) // unary minus operator applied to unsigned type, result still unsigned - #pragma warning(disable: 4571) // catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. -#endif + +// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc +// 4146 - unary minus operator applied to unsigned type, result still unsigned +// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. +EA_DISABLE_VC_WARNING(4530 4146 4571); + #if defined(EA_PRAGMA_ONCE_SUPPORTED) #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. @@ -1587,19 +1586,18 @@ namespace eastl }; template ::value> - struct pointer_element_type; + struct pointer_element_type + { + using type = Pointer; + }; template struct pointer_element_type { typedef typename Pointer::element_type type; }; - #if EASTL_VARIADIC_TEMPLATES_ENABLED // See 20.6.3.1 p3 for why we need to support this. Pointer may be a template with various arguments as opposed to a non-templated class. - template