diff --git a/doc/EASTL.natvis b/doc/EASTL.natvis
index 30986d5e..2fb311b1 100644
--- a/doc/EASTL.natvis
+++ b/doc/EASTL.natvis
@@ -558,6 +558,76 @@
{mFlag.mAtomic}
+
+
+ [valueless_by_exception]
+ {{ index=0, value={($T1*)mStorage.mBuffer.mCharData}}
+ {{ index=1, value={($T2*)mStorage.mBuffer.mCharData}}
+ {{ index=2, value={($T3*)mStorage.mBuffer.mCharData}}
+ {{ index=3, value={($T4*)mStorage.mBuffer.mCharData}}
+ {{ index=4, value={($T5*)mStorage.mBuffer.mCharData}}
+ {{ index=5, value={($T6*)mStorage.mBuffer.mCharData}}
+ {{ index=6, value={($T7*)mStorage.mBuffer.mCharData}}
+ {{ index=7, value={($T8*)mStorage.mBuffer.mCharData}}
+ {{ index=8, value={($T9*)mStorage.mBuffer.mCharData}}
+ {{ index=9, value={($T10*)mStorage.mBuffer.mCharData}}
+ {{ index=10, value={($T11*)mStorage.mBuffer.mCharData}}
+ {{ index=11, value={($T12*)mStorage.mBuffer.mCharData}}
+ {{ index=12, value={($T13*)mStorage.mBuffer.mCharData}}
+ {{ index=13, value={($T14*)mStorage.mBuffer.mCharData}}
+ {{ index=14, value={($T15*)mStorage.mBuffer.mCharData}}
+ {{ index=15, value={($T16*)mStorage.mBuffer.mCharData}}
+ {{ index=16, value={($T17*)mStorage.mBuffer.mCharData}}
+ {{ index=17, value={($T18*)mStorage.mBuffer.mCharData}}
+ {{ index=18, value={($T19*)mStorage.mBuffer.mCharData}}
+ {{ index=19, value={($T20*)mStorage.mBuffer.mCharData}}
+ {{ index=20, value={($T21*)mStorage.mBuffer.mCharData}}
+ {{ index=21, value={($T22*)mStorage.mBuffer.mCharData}}
+ {{ index=22, value={($T23*)mStorage.mBuffer.mCharData}}
+ {{ index=23, value={($T24*)mStorage.mBuffer.mCharData}}
+ {{ index=24, value={($T25*)mStorage.mBuffer.mCharData}}
+ {{ index=25, value={($T26*)mStorage.mBuffer.mCharData}}
+ {{ index=26, value={($T27*)mStorage.mBuffer.mCharData}}
+ {{ index=27, value={($T28*)mStorage.mBuffer.mCharData}}
+ {{ index=28, value={($T29*)mStorage.mBuffer.mCharData}}
+ {{ index=29, value={($T30*)mStorage.mBuffer.mCharData}}
+ {{ index=30, value={($T31*)mStorage.mBuffer.mCharData}}
+
+ - index()
+ - ($T1*)mStorage.mBuffer.mCharData
+ - ($T2*)mStorage.mBuffer.mCharData
+ - ($T3*)mStorage.mBuffer.mCharData
+ - ($T4*)mStorage.mBuffer.mCharData
+ - ($T5*)mStorage.mBuffer.mCharData
+ - ($T6*)mStorage.mBuffer.mCharData
+ - ($T7*)mStorage.mBuffer.mCharData
+ - ($T8*)mStorage.mBuffer.mCharData
+ - ($T9*)mStorage.mBuffer.mCharData
+ - ($T10*)mStorage.mBuffer.mCharData
+ - ($T11*)mStorage.mBuffer.mCharData
+ - ($T12*)mStorage.mBuffer.mCharData
+ - ($T13*)mStorage.mBuffer.mCharData
+ - ($T14*)mStorage.mBuffer.mCharData
+ - ($T15*)mStorage.mBuffer.mCharData
+ - ($T16*)mStorage.mBuffer.mCharData
+ - ($T17*)mStorage.mBuffer.mCharData
+ - ($T18*)mStorage.mBuffer.mCharData
+ - ($T19*)mStorage.mBuffer.mCharData
+ - ($T20*)mStorage.mBuffer.mCharData
+ - ($T21*)mStorage.mBuffer.mCharData
+ - ($T22*)mStorage.mBuffer.mCharData
+ - ($T23*)mStorage.mBuffer.mCharData
+ - ($T24*)mStorage.mBuffer.mCharData
+ - ($T25*)mStorage.mBuffer.mCharData
+ - ($T26*)mStorage.mBuffer.mCharData
+ - ($T27*)mStorage.mBuffer.mCharData
+ - ($T28*)mStorage.mBuffer.mCharData
+ - ($T29*)mStorage.mBuffer.mCharData
+ - ($T30*)mStorage.mBuffer.mCharData
+ - ($T31*)mStorage.mBuffer.mCharData
+
+
+
diff --git a/include/EASTL/allocator_malloc.h b/include/EASTL/allocator_malloc.h
index 31f8deca..78f4f69d 100644
--- a/include/EASTL/allocator_malloc.h
+++ b/include/EASTL/allocator_malloc.h
@@ -40,7 +40,7 @@
#endif
#elif defined(EA_PLATFORM_BSD)
#include
- #elif defined(EA_COMPILER_CLANG)
+ #elif defined(__clang__)
#if __has_include()
#include
#elif __has_include()
diff --git a/include/EASTL/array.h b/include/EASTL/array.h
index 590aa94b..05d5d32f 100644
--- a/include/EASTL/array.h
+++ b/include/EASTL/array.h
@@ -279,12 +279,6 @@ namespace eastl
EA_CPP14_CONSTEXPR inline typename array::reference
array::operator[](size_type i)
{
- #if EASTL_ASSERT_ENABLED
- if(EASTL_UNLIKELY(i >= N))
- EASTL_FAIL_MSG("array::operator[] -- out of range");
- #endif
-
- EA_ANALYSIS_ASSUME(i < N);
return mValue[i];
}
@@ -293,13 +287,6 @@ namespace eastl
EA_CPP14_CONSTEXPR inline typename array::const_reference
array::operator[](size_type i) const
{
- #if EASTL_ASSERT_ENABLED
- if(EASTL_UNLIKELY(i >= N))
- EASTL_FAIL_MSG("array::operator[] -- out of range");
-
- #endif
-
- EA_ANALYSIS_ASSUME(i < N);
return mValue[i];
}
@@ -308,11 +295,6 @@ namespace eastl
EA_CPP14_CONSTEXPR inline typename array::reference
array::front()
{
- #if EASTL_ASSERT_ENABLED
- if(EASTL_UNLIKELY(empty())) // We don't allow the user to reference an empty container.
- EASTL_FAIL_MSG("array::front -- empty array");
- #endif
-
return mValue[0];
}
@@ -321,11 +303,6 @@ namespace eastl
EA_CPP14_CONSTEXPR inline typename array::const_reference
array::front() const
{
- #if EASTL_ASSERT_ENABLED
- if(EASTL_UNLIKELY(empty())) // We don't allow the user to reference an empty container.
- EASTL_FAIL_MSG("array::front -- empty array");
- #endif
-
return mValue[0];
}
@@ -334,11 +311,6 @@ namespace eastl
EA_CPP14_CONSTEXPR inline typename array::reference
array::back()
{
- #if EASTL_ASSERT_ENABLED
- if(EASTL_UNLIKELY(empty())) // We don't allow the user to reference an empty container.
- EASTL_FAIL_MSG("array::back -- empty array");
- #endif
-
return mValue[N - 1];
}
@@ -347,11 +319,6 @@ namespace eastl
EA_CPP14_CONSTEXPR inline typename array::const_reference
array::back() const
{
- #if EASTL_ASSERT_ENABLED
- if(EASTL_UNLIKELY(empty())) // We don't allow the user to reference an empty container.
- EASTL_FAIL_MSG("array::back -- empty array");
- #endif
-
return mValue[N - 1];
}
@@ -381,7 +348,6 @@ namespace eastl
EASTL_FAIL_MSG("array::at -- out of range");
#endif
- EA_ANALYSIS_ASSUME(i < N);
return static_cast(mValue[i]);
}
@@ -397,7 +363,6 @@ namespace eastl
EASTL_FAIL_MSG("array::at -- out of range");
#endif
- EA_ANALYSIS_ASSUME(i < N);
return static_cast(mValue[i]);
}
diff --git a/include/EASTL/bit.h b/include/EASTL/bit.h
new file mode 100644
index 00000000..64efe487
--- /dev/null
+++ b/include/EASTL/bit.h
@@ -0,0 +1,65 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_BIT_H
+#define EASTL_BIT_H
+
+#include
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include
+#include
+#include // memcpy
+
+namespace eastl
+{
+ // eastl::bit_cast
+ // Obtains a value of type To by reinterpreting the object representation of 'from'.
+ // Every bit in the value representation of the returned To object is equal to the
+ // corresponding bit in the object representation of 'from'.
+ //
+ // In order for bit_cast to be constexpr, the compiler needs to explicitly support
+ // it by providing the __builtin_bit_cast builtin. If that builtin is not available,
+ // then we memcpy into aligned storage at runtime and return that instead.
+ //
+ // Both types To and From must be equal in size, and must be trivially copyable.
+
+ #if defined(EASTL_CONSTEXPR_BIT_CAST_SUPPORTED) && EASTL_CONSTEXPR_BIT_CAST_SUPPORTED
+
+ template::value
+ && eastl::is_trivially_copyable::value
+ >
+ >
+ EA_CONSTEXPR To bit_cast(const From& from) EA_NOEXCEPT
+ {
+ return __builtin_bit_cast(To, from);
+ }
+
+ #else
+
+ template::value
+ && eastl::is_trivially_copyable::value
+ >
+ >
+ inline To bit_cast(const From& from) EA_NOEXCEPT
+ {
+ typename eastl::aligned_storage::type to;
+ ::memcpy(eastl::addressof(to), eastl::addressof(from), sizeof(To));
+ return reinterpret_cast(to);
+ }
+
+ #endif // EASTL_CONSTEXPR_BIT_CAST_SUPPORTED
+
+} // namespace eastl
+
+#endif // EASTL_BIT_H
diff --git a/include/EASTL/bitset.h b/include/EASTL/bitset.h
index d9261050..8778372f 100644
--- a/include/EASTL/bitset.h
+++ b/include/EASTL/bitset.h
@@ -1505,7 +1505,7 @@ EA_RESTORE_GCC_WARNING()
inline typename BitsetBase<2, WordType>::size_type
BitsetBase<2, WordType>::count() const
{
- #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) // GCC 3.4 or later
+ #if (defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304)) || defined(__clang__) // GCC 3.4 or later
#if(EA_PLATFORM_WORD_SIZE == 4)
return (size_type)__builtin_popcountl(mWord[0]) + (size_type)__builtin_popcountl(mWord[1]);
#else
diff --git a/include/EASTL/chrono.h b/include/EASTL/chrono.h
index 453ab0f4..5d8ca425 100644
--- a/include/EASTL/chrono.h
+++ b/include/EASTL/chrono.h
@@ -597,8 +597,7 @@ namespace chrono
timespec ts;
int result = clock_gettime(CLOCK_MONOTONIC, &ts);
- if(result == EINVAL
- )
+ if (result == -1 && errno == EINVAL)
result = clock_gettime(CLOCK_REALTIME, &ts);
const uint64_t nNanoseconds = (uint64_t)ts.tv_nsec + ((uint64_t)ts.tv_sec * UINT64_C(1000000000));
diff --git a/include/EASTL/fixed_hash_map.h b/include/EASTL/fixed_hash_map.h
index af6663dd..b94ea541 100644
--- a/include/EASTL/fixed_hash_map.h
+++ b/include/EASTL/fixed_hash_map.h
@@ -251,7 +251,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -267,11 +267,13 @@ namespace eastl
{
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
- if(!bEnableOverflow)
+ if (!bEnableOverflow)
+ {
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -288,11 +290,13 @@ namespace eastl
{
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
- if(!bEnableOverflow)
+ if (!bEnableOverflow)
+ {
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -314,7 +318,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -377,7 +381,7 @@ namespace eastl
mAllocator.copy_overflow_allocator(x.mAllocator);
#if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
+ mAllocator.set_name(x.mAllocator.get_name());
#endif
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
@@ -402,7 +406,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -532,8 +536,10 @@ namespace eastl
{
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
- if(!bEnableOverflow)
+ if (!bEnableOverflow)
+ {
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
#if EASTL_NAME_ENABLED
mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
@@ -556,7 +562,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -577,7 +583,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -599,7 +605,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -616,7 +622,7 @@ namespace eastl
mAllocator.copy_overflow_allocator(x.mAllocator);
#if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
+ mAllocator.set_name(x.mAllocator.get_name());
#endif
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
@@ -662,7 +668,7 @@ namespace eastl
mAllocator.copy_overflow_allocator(x.mAllocator);
#if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
+ mAllocator.set_name(x.mAllocator.get_name());
#endif
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
@@ -687,7 +693,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
diff --git a/include/EASTL/fixed_hash_set.h b/include/EASTL/fixed_hash_set.h
index 0db9f49f..fa2783ad 100644
--- a/include/EASTL/fixed_hash_set.h
+++ b/include/EASTL/fixed_hash_set.h
@@ -75,7 +75,7 @@ namespace eastl
bucketCount + 1,
sizeof(typename hash_set::node_type),
nodeCount,
- EASTL_ALIGN_OF(Value),
+ EASTL_ALIGN_OF(typename hash_set::node_type),
0,
bEnableOverflow,
OverflowAllocator>,
@@ -83,8 +83,9 @@ namespace eastl
{
public:
typedef fixed_hashtable_allocator::node_type), nodeCount, EASTL_ALIGN_OF(Value), 0,
- bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ OverflowAllocator, bCacheHashCode>::node_type), nodeCount,
+ EASTL_ALIGN_OF(typename hash_set::node_type),
+ 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
typedef fixed_hash_set this_type;
typedef hash_set base_type;
@@ -162,7 +163,7 @@ namespace eastl
bucketCount + 1,
sizeof(typename hash_multiset::node_type),
nodeCount,
- EASTL_ALIGN_OF(Value),
+ EASTL_ALIGN_OF(typename hash_multiset::node_type),
0,
bEnableOverflow,
OverflowAllocator>,
@@ -170,7 +171,8 @@ namespace eastl
{
public:
typedef fixed_hashtable_allocator::node_type), nodeCount, EASTL_ALIGN_OF(Value), 0,
+ OverflowAllocator, bCacheHashCode>::node_type), nodeCount, EASTL_ALIGN_OF(typename hash_multiset::node_type), 0,
bEnableOverflow, OverflowAllocator> fixed_allocator_type;
typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
typedef hash_multiset base_type;
@@ -238,11 +240,13 @@ namespace eastl
{
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
- if(!bEnableOverflow)
+ if (!bEnableOverflow)
+ {
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -262,7 +266,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -279,11 +283,13 @@ namespace eastl
{
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
- if(!bEnableOverflow)
+ if (!bEnableOverflow)
+ {
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -302,10 +308,12 @@ namespace eastl
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
if(!bEnableOverflow)
+ {
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ }
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -322,7 +330,7 @@ namespace eastl
mAllocator.copy_overflow_allocator(x.mAllocator);
#if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
+ mAllocator.set_name(x.mAllocator.get_name());
#endif
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
@@ -344,7 +352,7 @@ namespace eastl
mAllocator.copy_overflow_allocator(x.mAllocator);
#if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
+ mAllocator.set_name(x.mAllocator.get_name());
#endif
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
@@ -366,7 +374,7 @@ namespace eastl
mAllocator.copy_overflow_allocator(x.mAllocator);
#if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
+ mAllocator.set_name(x.mAllocator.get_name());
#endif
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
@@ -391,7 +399,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -515,7 +523,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -535,7 +543,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -556,7 +564,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -578,7 +586,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
@@ -595,7 +603,7 @@ namespace eastl
mAllocator.copy_overflow_allocator(x.mAllocator);
#if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
+ mAllocator.set_name(x.mAllocator.get_name());
#endif
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
@@ -617,7 +625,7 @@ namespace eastl
mAllocator.copy_overflow_allocator(x.mAllocator);
#if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
+ mAllocator.set_name(x.mAllocator.get_name());
#endif
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
@@ -639,7 +647,7 @@ namespace eastl
mAllocator.copy_overflow_allocator(x.mAllocator);
#if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
+ mAllocator.set_name(x.mAllocator.get_name());
#endif
EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
@@ -664,7 +672,7 @@ namespace eastl
base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
#if EASTL_NAME_ENABLED
- mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
#endif
mAllocator.reset(mNodeBuffer);
diff --git a/include/EASTL/fixed_list.h b/include/EASTL/fixed_list.h
index 9e48089c..e57c08bf 100644
--- a/include/EASTL/fixed_list.h
+++ b/include/EASTL/fixed_list.h
@@ -63,12 +63,12 @@ namespace eastl
/// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
///
template
- class fixed_list : public list::node_type),
- nodeCount, EASTL_ALIGN_OF(T), 0, bEnableOverflow, OverflowAllocator> >
+ class fixed_list : public list::node_type),
+ nodeCount, EASTL_ALIGN_OF(typename list::node_type), 0, bEnableOverflow, OverflowAllocator> >
{
public:
- typedef fixed_node_allocator::node_type), nodeCount,
- EASTL_ALIGN_OF(T), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef fixed_node_allocator::node_type), nodeCount,
+ EASTL_ALIGN_OF(typename list::node_type), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
typedef OverflowAllocator overflow_allocator_type;
typedef list base_type;
typedef fixed_list this_type;
diff --git a/include/EASTL/fixed_slist.h b/include/EASTL/fixed_slist.h
index 85a7a7b3..abad7ad9 100644
--- a/include/EASTL/fixed_slist.h
+++ b/include/EASTL/fixed_slist.h
@@ -63,12 +63,12 @@ namespace eastl
/// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
///
template
- class fixed_slist : public slist::node_type),
- nodeCount, EASTL_ALIGN_OF(T), 0, bEnableOverflow, OverflowAllocator> >
+ class fixed_slist : public slist::node_type),
+ nodeCount, EASTL_ALIGN_OF(typename slist::node_type), 0, bEnableOverflow, OverflowAllocator> >
{
public:
- typedef fixed_node_allocator::node_type), nodeCount,
- EASTL_ALIGN_OF(T), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
+ typedef fixed_node_allocator::node_type), nodeCount,
+ EASTL_ALIGN_OF(typename slist::node_type), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type;
typedef OverflowAllocator overflow_allocator_type;
typedef slist base_type;
typedef fixed_slist this_type;
diff --git a/include/EASTL/functional.h b/include/EASTL/functional.h
index 556bf020..6fa34893 100644
--- a/include/EASTL/functional.h
+++ b/include/EASTL/functional.h
@@ -389,52 +389,41 @@ namespace eastl
// Dual type functions
///////////////////////////////////////////////////////////////////////
+
template
struct equal_to_2 : public binary_function
{
EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const
{ return a == b; }
- EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const // If you are getting a 'operator() already defined' error related to on this line while compiling a
- { return b == a; } // hashtable class (e.g. hash_map), it's likely that you are using hashtable::find_as when you should
- }; // be using hashtable::find instead. The problem is that (const T, U) collide. To do: make this work.
- template
- struct equal_to_2 : public equal_to
- {
+ template , eastl::remove_const_t>>>
+ EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const
+ { return b == a; }
};
-
template
struct not_equal_to_2 : public binary_function
{
EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const
{ return a != b; }
+
+ template , eastl::remove_const_t>>>
EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const
{ return b != a; }
};
- template
- struct not_equal_to_2 : public not_equal_to
- {
- };
-
template
struct less_2 : public binary_function
{
EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const
{ return a < b; }
+
+ template , eastl::remove_const_t>>>
EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const
{ return b < a; }
};
- template
- struct less_2 : public less
- {
- };
-
-
-
/// unary_negate
///
diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h b/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h
index c52962eb..44dc991d 100644
--- a/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h
+++ b/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h
@@ -11,7 +11,7 @@
#endif
-#if defined(EA_COMPILER_MSVC)
+#if defined(EA_COMPILER_MSVC) && !defined(EA_COMPILER_CLANG_CL)
#if defined(EA_PROCESSOR_ARM32)
@@ -46,7 +46,7 @@
EASTL_ATOMIC_COMPILER_BARRIER()
-#elif defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)
+#elif defined(EA_COMPILER_GNUC) || defined(__clang__)
#define EASTL_ARM_DMB_ISH ish
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86.h b/include/EASTL/internal/atomic/arch/x86/arch_x86.h
index 5087c133..142a5143 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86.h
@@ -32,23 +32,14 @@
/////////////////////////////////////////////////////////////////////////////////
-
-#if defined(EA_COMPILER_MSVC)
-
+#if (defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)
+ #define EASTL_ARCH_ATOMIC_HAS_128BIT
+#elif defined(EA_COMPILER_MSVC)
#if EA_PLATFORM_PTR_SIZE == 8
#define EASTL_ARCH_ATOMIC_HAS_128BIT
#endif
-
#endif
-
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
-
- #define EASTL_ARCH_ATOMIC_HAS_128BIT
-
-#endif
-
-
/////////////////////////////////////////////////////////////////////////////////
@@ -104,7 +95,7 @@
* SSE 128-bit loads are not guaranteed to be atomic even though some CPUs
* make them atomic such as AMD Ryzen or Intel SandyBridge.
*/
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h
index 4534806d..7b77528e 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h
@@ -54,7 +54,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h
index c38ba414..05831636 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h
@@ -54,7 +54,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h
index e028398a..1968e9ab 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h
@@ -15,7 +15,7 @@
//
// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
//
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h
index f8b956a3..61a126c1 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h
@@ -15,7 +15,7 @@
//
// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
//
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h
index 0f058004..624d2f55 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h
@@ -51,7 +51,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h
index d78b3334..e816af9b 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h
@@ -51,7 +51,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED(ret, observed, val) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h
index fd7dbb9c..ff27b1a2 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h
@@ -51,7 +51,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED(ret, observed, val) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h
index 50da6db7..8627d3a2 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h
@@ -51,7 +51,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED(ret, observed, val) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h
index 77bee83b..14b43f90 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h
@@ -51,7 +51,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED(ret, observed, val) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h
index 2e76b0c5..666df8bf 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h
@@ -51,7 +51,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED(ret, observed, val) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h
index b0441903..644a2a17 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h
@@ -15,7 +15,46 @@
//
// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
//
-#if defined(EA_COMPILER_MSVC)
+
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ /**
+ * NOTE:
+ *
+ * Since the cmpxchg 128-bit inline assembly does a sete in the asm to set the return boolean,
+ * it doesn't get dead-store removed even though we don't care about the success of the
+ * cmpxchg since the compiler cannot reason about what is inside asm blocks.
+ * Thus this variant just does the minimum required to do an atomic load.
+ */
+#define EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, MemoryOrder) \
+ { \
+ EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 expected = 0; \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expected); \
+ \
+ /* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \
+ __asm__ __volatile__ ("lock; cmpxchg16b %2" /* cmpxchg16b sets/clears ZF */ \
+ /* Output Operands */ \
+ : "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \
+ "+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))) \
+ /* Input Operands */ \
+ : "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \
+ "a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]) \
+ /* Clobbers */ \
+ : "memory", "cc"); \
+ }
+
+
+#define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, RELAXED)
+
+#define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, ACQUIRE)
+
+#define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, SEQ_CST)
+
+#elif defined(EA_COMPILER_MSVC)
#if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1920) // >= VS2019
@@ -119,49 +158,6 @@
#define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, SEQ_CST)
-
-#endif
-
-
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
-
-
- /**
- * NOTE:
- *
- * Since the cmpxchg 128-bit inline assembly does a sete in the asm to set the return boolean,
- * it doesn't get dead-store removed even though we don't care about the success of the
- * cmpxchg since the compiler cannot reason about what is inside asm blocks.
- * Thus this variant just does the minimum required to do an atomic load.
- */
- #define EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, MemoryOrder) \
- { \
- EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 expected = 0; \
- ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expected); \
- \
- /* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \
- __asm__ __volatile__ ("lock; cmpxchg16b %2" /* cmpxchg16b sets/clears ZF */ \
- /* Output Operands */ \
- : "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \
- "+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))) \
- /* Input Operands */ \
- : "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \
- "a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]) \
- /* Clobbers */ \
- : "memory", "cc"); \
- }
-
-
- #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
- EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, RELAXED)
-
- #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
- EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, ACQUIRE)
-
- #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
- EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, SEQ_CST)
-
-
#endif
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h
index 1d1c8fca..7bad141f 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h
@@ -46,7 +46,7 @@
#endif
-#elif defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)
+#elif defined(__clang__) || defined(EA_COMPILER_GNUC)
/**
* NOTE:
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h
index 751cc2a3..42f7d61f 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h
@@ -54,7 +54,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h
index 397ff5f8..31655c3b 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h
@@ -145,7 +145,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, MemoryOrder) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h
index 124b586d..a1d09329 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h
@@ -54,7 +54,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h
index fe3bd58c..183c7f3a 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h
@@ -31,7 +31,7 @@
#endif
-#if defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)
+#if defined(EA_COMPILER_MSVC) || defined(__clang__) || defined(EA_COMPILER_GNUC)
#define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST() \
EASTL_ATOMIC_CPU_MB()
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h
index 28cb9587..a5b62c3b 100644
--- a/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h
@@ -54,7 +54,7 @@
#endif
-#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+#if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
#define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
diff --git a/include/EASTL/internal/atomic/atomic.h b/include/EASTL/internal/atomic/atomic.h
index e1c5286e..eb27d2d9 100644
--- a/include/EASTL/internal/atomic/atomic.h
+++ b/include/EASTL/internal/atomic/atomic.h
@@ -62,7 +62,7 @@ namespace internal
template
struct is_atomic_lockfree_size
{
- static EASTL_CPP17_INLINE_VARIABLE constexpr bool value = false ||
+ static EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR_OR_CONST bool value = false ||
#if defined(EASTL_ATOMIC_HAS_8BIT)
sizeof(T) == 1 ||
#endif
@@ -85,7 +85,7 @@ namespace internal
template
struct is_user_type_suitable_for_primary_template
{
- static EASTL_CPP17_INLINE_VARIABLE constexpr bool value = eastl::internal::is_atomic_lockfree_size::value;
+ static EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR_OR_CONST bool value = eastl::internal::is_atomic_lockfree_size::value;
};
@@ -116,7 +116,7 @@ namespace internal
\
public: \
\
- static EASTL_CPP17_INLINE_VARIABLE constexpr bool is_always_lock_free = eastl::internal::is_atomic_lockfree_size::value; \
+ static EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR_OR_CONST bool is_always_lock_free = eastl::internal::is_atomic_lockfree_size::value; \
\
public: /* deleted ctors && assignment operators */ \
\
diff --git a/include/EASTL/internal/atomic/atomic_flag.h b/include/EASTL/internal/atomic/atomic_flag.h
index e135d612..eed448ae 100644
--- a/include/EASTL/internal/atomic/atomic_flag.h
+++ b/include/EASTL/internal/atomic/atomic_flag.h
@@ -42,13 +42,13 @@ class atomic_flag
public: /* clear */
template
- void clear(Order order) volatile EA_NOEXCEPT
+ void clear(Order /*order*/) volatile EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order);
}
template
- void clear(Order order) EA_NOEXCEPT
+ void clear(Order /*order*/) EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
}
@@ -76,14 +76,14 @@ class atomic_flag
public: /* test_and_set */
template
- bool test_and_set(Order order) volatile EA_NOEXCEPT
+ bool test_and_set(Order /*order*/) volatile EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order);
return false;
}
template
- bool test_and_set(Order order) EA_NOEXCEPT
+ bool test_and_set(Order /*order*/) EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
return false;
@@ -122,14 +122,14 @@ class atomic_flag
public: /* test */
template
- bool test(Order order) const volatile EA_NOEXCEPT
+ bool test(Order /*order*/) const volatile EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order);
return false;
}
template
- bool test(Order order) const EA_NOEXCEPT
+ bool test(Order /*order*/) const EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
return false;
diff --git a/include/EASTL/internal/atomic/atomic_integral.h b/include/EASTL/internal/atomic/atomic_integral.h
index 7c94db32..bcf7c178 100644
--- a/include/EASTL/internal/atomic/atomic_integral.h
+++ b/include/EASTL/internal/atomic/atomic_integral.h
@@ -24,18 +24,18 @@ namespace internal
#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(funcName) \
template \
- T funcName(T arg, Order order) EA_NOEXCEPT \
+ T funcName(T /*arg*/, Order /*order*/) EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
} \
\
template \
- T funcName(T arg, Order order) volatile EA_NOEXCEPT \
+ T funcName(T /*arg*/, Order /*order*/) volatile EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
} \
\
- T funcName(T arg) volatile EA_NOEXCEPT \
+ T funcName(T /*arg*/) volatile EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
}
@@ -54,7 +54,7 @@ namespace internal
#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(operatorOp) \
- T operator operatorOp(T arg) volatile EA_NOEXCEPT \
+ T operator operatorOp(T /*arg*/) volatile EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
}
diff --git a/include/EASTL/internal/atomic/atomic_memory_order.h b/include/EASTL/internal/atomic/atomic_memory_order.h
index b1c14035..1564d87d 100644
--- a/include/EASTL/internal/atomic/atomic_memory_order.h
+++ b/include/EASTL/internal/atomic/atomic_memory_order.h
@@ -30,12 +30,12 @@ struct memory_order_seq_cst_s {};
} // namespace internal
-EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_relaxed = internal::memory_order_relaxed_s{};
-EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_read_depends = internal::memory_order_read_depends_s{};
-EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_acquire = internal::memory_order_acquire_s{};
-EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_release = internal::memory_order_release_s{};
-EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_acq_rel = internal::memory_order_acq_rel_s{};
-EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_seq_cst = internal::memory_order_seq_cst_s{};
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_relaxed = internal::memory_order_relaxed_s{};
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_read_depends = internal::memory_order_read_depends_s{};
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_acquire = internal::memory_order_acquire_s{};
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_release = internal::memory_order_release_s{};
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_acq_rel = internal::memory_order_acq_rel_s{};
+EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR auto memory_order_seq_cst = internal::memory_order_seq_cst_s{};
} // namespace eastl
diff --git a/include/EASTL/internal/atomic/atomic_pointer.h b/include/EASTL/internal/atomic/atomic_pointer.h
index 18f6691c..c0b19e66 100644
--- a/include/EASTL/internal/atomic/atomic_pointer.h
+++ b/include/EASTL/internal/atomic/atomic_pointer.h
@@ -27,18 +27,18 @@ namespace internal
#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(funcName) \
template \
- T* funcName(ptrdiff_t arg, Order order) EA_NOEXCEPT \
+ T* funcName(ptrdiff_t /*arg*/, Order /*order*/) EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
} \
\
template \
- T* funcName(ptrdiff_t arg, Order order) volatile EA_NOEXCEPT \
+ T* funcName(ptrdiff_t /*arg*/, Order /*order*/) volatile EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
} \
\
- T* funcName(ptrdiff_t arg) volatile EA_NOEXCEPT \
+ T* funcName(ptrdiff_t /*arg*/) volatile EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
}
@@ -55,7 +55,7 @@ namespace internal
}
#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(operatorOp) \
- T* operator operatorOp(ptrdiff_t arg) volatile EA_NOEXCEPT \
+ T* operator operatorOp(ptrdiff_t /*arg*/) volatile EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
}
diff --git a/include/EASTL/internal/atomic/atomic_size_aligned.h b/include/EASTL/internal/atomic/atomic_size_aligned.h
index db23e478..f5033758 100644
--- a/include/EASTL/internal/atomic/atomic_size_aligned.h
+++ b/include/EASTL/internal/atomic/atomic_size_aligned.h
@@ -24,40 +24,40 @@ namespace internal
#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(funcName) \
template \
- bool funcName(T& expected, T desired, \
- OrderSuccess orderSuccess, \
- OrderFailure orderFailure) EA_NOEXCEPT \
+ bool funcName(T& /*expected*/, T /*desired*/, \
+ OrderSuccess /*orderSuccess*/, \
+ OrderFailure /*orderFailure*/) EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
return false; \
} \
\
template \
- bool funcName(T& expected, T desired, \
- OrderSuccess orderSuccess, \
- OrderFailure orderFailure) volatile EA_NOEXCEPT \
+ bool funcName(T& /*expected*/, T /*desired*/, \
+ OrderSuccess /*orderSuccess*/, \
+ OrderFailure /*orderFailure*/) volatile EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
return false; \
} \
\
template \
- bool funcName(T& expected, T desired, \
- Order order) EA_NOEXCEPT \
+ bool funcName(T& /*expected*/, T /*desired*/, \
+ Order /*order*/) EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
return false; \
} \
\
template \
- bool funcName(T& expected, T desired, \
- Order order) volatile EA_NOEXCEPT \
+ bool funcName(T& /*expected*/, T /*desired*/, \
+ Order /*order*/) volatile EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
return false; \
} \
\
- bool funcName(T& expected, T desired) volatile EA_NOEXCEPT \
+ bool funcName(T& /*expected*/, T /*desired*/) volatile EA_NOEXCEPT \
{ \
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
return false; \
@@ -90,18 +90,18 @@ namespace internal
public: /* store */
template
- void store(T desired, Order order) EA_NOEXCEPT
+ void store(T /*desired*/, Order /*order*/) EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T);
}
template
- void store(T desired, Order order) volatile EA_NOEXCEPT
+ void store(T /*desired*/, Order /*order*/) volatile EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
}
- void store(T desired) volatile EA_NOEXCEPT
+ void store(T /*desired*/) volatile EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
}
@@ -109,13 +109,13 @@ namespace internal
public: /* load */
template
- T load(Order order) const EA_NOEXCEPT
+ T load(Order /*order*/) const EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T);
}
template
- T load(Order order) const volatile EA_NOEXCEPT
+ T load(Order /*order*/) const volatile EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
}
@@ -128,18 +128,18 @@ namespace internal
public: /* exchange */
template
- T exchange(T desired, Order order) EA_NOEXCEPT
+ T exchange(T /*desired*/, Order /*order*/) EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T);
}
template
- T exchange(T desired, Order order) volatile EA_NOEXCEPT
+ T exchange(T /*desired*/, Order /*order*/) volatile EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
}
- T exchange(T desired) volatile EA_NOEXCEPT
+ T exchange(T /*desired*/) volatile EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
}
@@ -154,7 +154,7 @@ namespace internal
public: /* assignment operator */
- T operator=(T desired) volatile EA_NOEXCEPT
+ T operator=(T /*desired*/) volatile EA_NOEXCEPT
{
EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
}
diff --git a/include/EASTL/internal/atomic/compiler/compiler.h b/include/EASTL/internal/atomic/compiler/compiler.h
index 65a4cd00..fc128795 100644
--- a/include/EASTL/internal/atomic/compiler/compiler.h
+++ b/include/EASTL/internal/atomic/compiler/compiler.h
@@ -15,7 +15,7 @@
//
// Include the compiler specific implementations
//
-#if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)
+#if defined(EA_COMPILER_GNUC) || defined(__clang__)
#include "gcc/compiler_gcc.h"
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h
index 02e2d03a..90b78a65 100644
--- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h
@@ -16,7 +16,9 @@
// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER()
//
#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() \
- _ReadWriteBarrier()
+ EA_DISABLE_CLANG_WARNING(-Wdeprecated-declarations) \
+ _ReadWriteBarrier() \
+ EA_RESTORE_CLANG_WARNING()
/////////////////////////////////////////////////////////////////////////////////
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h
index 42117a1a..8217f232 100644
--- a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h
@@ -10,7 +10,6 @@
#pragma once
#endif
-
#if defined(EA_PROCESSOR_X86_64)
#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8 _InterlockedCompareExchange8
diff --git a/include/EASTL/internal/config.h b/include/EASTL/internal/config.h
index 530bbc87..8dc14202 100644
--- a/include/EASTL/internal/config.h
+++ b/include/EASTL/internal/config.h
@@ -89,8 +89,8 @@
///////////////////////////////////////////////////////////////////////////////
#ifndef EASTL_VERSION
- #define EASTL_VERSION "3.17.06"
- #define EASTL_VERSION_N 31706
+ #define EASTL_VERSION "3.18.00"
+ #define EASTL_VERSION_N 31800
#endif
@@ -864,7 +864,7 @@ namespace eastl
#if EASTL_INT128_SUPPORTED
#define EASTL_INT128_DEFINED 1
- #if defined(__SIZEOF_INT128__) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)
+ #if defined(__SIZEOF_INT128__) || defined(EA_COMPILER_GNUC) || defined(__clang__)
typedef __int128_t eastl_int128_t;
typedef __uint128_t eastl_uint128_t;
#else
@@ -1274,7 +1274,7 @@ namespace eastl
// useful macro identifier for our type traits implementation.
//
#ifndef EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE
- #if defined(_MSC_VER) && (_MSC_VER >= 1500) // VS2008 or later
+ #if defined(_MSC_VER) && (_MSC_VER >= 1500) && !defined(EA_COMPILER_CLANG_CL) // VS2008 or later
#pragma warning(push, 0)
#include
#pragma warning(pop)
@@ -1283,9 +1283,9 @@ namespace eastl
#else
#define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0
#endif
- #elif defined(EA_COMPILER_CLANG) && defined(__APPLE__) && defined(_CXXCONFIG) // Apple clang but with GCC's libstdc++.
+ #elif defined(__clang__) && defined(__APPLE__) && defined(_CXXCONFIG) // Apple clang but with GCC's libstdc++.
#define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0
- #elif defined(EA_COMPILER_CLANG)
+ #elif defined(__clang__)
#define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1
#elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003) && !defined(__GCCXML__)
#define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1
@@ -1836,14 +1836,14 @@ typedef EASTL_SSIZE_T eastl_ssize_t; // Signed version of eastl_size_t. Concept
/// EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE
-#if defined(_MSC_VER) && (_MSC_VER >= 1913) // VS2017+
- #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 1
-#elif defined(EA_COMPILER_CLANG)
+#if defined(__clang__)
#if !__is_identifier(__has_unique_object_representations)
#define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 1
#else
#define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 0
#endif
+#elif defined(_MSC_VER) && (_MSC_VER >= 1913) // VS2017+
+ #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 1
#else
#define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 0
#endif
@@ -1873,5 +1873,17 @@ typedef EASTL_SSIZE_T eastl_ssize_t; // Signed version of eastl_size_t. Concept
#define EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT(...)
#endif
+/// EASTL_CONSTEXPR_BIT_CAST_SUPPORTED
+/// eastl::bit_cast, in order to be implemented as constexpr, requires explicit compiler support.
+/// This macro defines whether it's possible for bit_cast to be constexpr.
+///
+#if (defined(EA_COMPILER_MSVC) && defined(EA_COMPILER_MSVC_VERSION_14_26) && EA_COMPILER_VERSION >= EA_COMPILER_MSVC_VERSION_14_26) \
+ || EA_COMPILER_HAS_BUILTIN(__builtin_bit_cast)
+ #define EASTL_CONSTEXPR_BIT_CAST_SUPPORTED 1
+#else
+ #define EASTL_CONSTEXPR_BIT_CAST_SUPPORTED 0
+#endif
+
+
#endif // Header include guard
diff --git a/include/EASTL/internal/copy_help.h b/include/EASTL/internal/copy_help.h
index e5fb2abd..67b5d876 100644
--- a/include/EASTL/internal/copy_help.h
+++ b/include/EASTL/internal/copy_help.h
@@ -6,12 +6,12 @@
#ifndef EASTL_INTERNAL_COPY_HELP_H
#define EASTL_INTERNAL_COPY_HELP_H
+#include
#if defined(EA_PRAGMA_ONCE_SUPPORTED)
#pragma once
#endif
-#include
#include
#include
#include // memcpy, memcmp, memmove
@@ -19,15 +19,15 @@
namespace eastl
{
- /// move / move_n / move_backward
+ /// move / move_n / move_backward
/// copy / copy_n / copy_backward
///
/// We want to optimize move, move_n, move_backward, copy, copy_backward, copy_n to do memmove operations
- /// when possible.
+ /// when possible.
///
- /// We could possibly use memcpy, though it has stricter overlap requirements than the move and copy
- /// algorithms and would require a runtime if/else to choose it over memmove. In particular, memcpy
- /// allows no range overlap at all, whereas move/copy allow output end overlap and move_backward/copy_backward
+ /// We could possibly use memcpy, though it has stricter overlap requirements than the move and copy
+ /// algorithms and would require a runtime if/else to choose it over memmove. In particular, memcpy
+ /// allows no range overlap at all, whereas move/copy allow output end overlap and move_backward/copy_backward
/// allow output begin overlap. Despite this it might be useful to use memcpy for any platforms where
/// memcpy is significantly faster than memmove, and since in most cases the copy/move operation in fact
/// doesn't target overlapping memory and so memcpy would be usable.
@@ -36,13 +36,13 @@ namespace eastl
/// InputIterator and OutputIterator are of the same type.
/// InputIterator and OutputIterator are of type contiguous_iterator_tag or simply are pointers (the two are virtually synonymous).
/// is_trivially_copyable::value is true. i.e. the constructor T(const T& t) (or T(T&& t) if present) can be replaced by memmove(this, &t, sizeof(T))
- ///
- /// copy normally differs from move, but there is a case where copy is the same as move: when copy is
- /// used with a move_iterator. We handle that case here by detecting that copy is being done with a
+ ///
+ /// copy normally differs from move, but there is a case where copy is the same as move: when copy is
+ /// used with a move_iterator. We handle that case here by detecting that copy is being done with a
/// move_iterator and redirect it to move (which can take advantage of memmove/memcpy).
///
- /// The generic_iterator class is typically used for wrapping raw memory pointers so they can act like
- /// formal iterators. Since pointers provide an opportunity for memmove/memcpy operations, we can
+ /// The generic_iterator class is typically used for wrapping raw memory pointers so they can act like
+ /// formal iterators. Since pointers provide an opportunity for memmove/memcpy operations, we can
/// detect a generic iterator and use it's wrapped type as a pointer if it happens to be one.
// Implementation moving copying both trivial and non-trivial data via a lesser iterator than random-access.
@@ -61,7 +61,7 @@ namespace eastl
// Specialization for copying non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const.
// This specialization converts the random access InputIterator last-first to an integral type. There's simple way for us to take advantage of a random access output iterator,
// as the range is specified by the input instead of the output, and distance(first, last) for a non-random-access iterator is potentially slow.
- template <>
+ template <>
struct move_and_copy_helper
{
template
@@ -88,7 +88,7 @@ namespace eastl
return result;
}
};
-
+
// Specialization for moving non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const.
template <>
struct move_and_copy_helper
@@ -130,9 +130,9 @@ namespace eastl
typedef typename eastl::iterator_traits::value_type value_type_input;
typedef typename eastl::iterator_traits::value_type value_type_output;
- const bool canBeMemmoved = eastl::is_trivially_copyable::value &&
- eastl::is_same::value &&
- (eastl::is_pointer::value || eastl::is_same::value) &&
+ const bool canBeMemmoved = eastl::is_trivially_copyable::value &&
+ eastl::is_same::value &&
+ (eastl::is_pointer::value || eastl::is_same::value) &&
(eastl::is_pointer::value || eastl::is_same::value);
return eastl::move_and_copy_helper::move_or_copy(first, last, result); // Need to chose based on the input iterator tag and not the output iterator tag, because containers accept input ranges of iterator types different than self.
@@ -149,11 +149,11 @@ namespace eastl
/// move
///
- /// After this operation the elements in the moved-from range will still contain valid values of the
- /// appropriate type, but not necessarily the same values as before the move.
+ /// After this operation the elements in the moved-from range will still contain valid values of the
+ /// appropriate type, but not necessarily the same values as before the move.
/// Returns the end of the result range.
/// Note: When moving between containers, the dest range must be valid; this function doesn't resize containers.
- /// Note: if result is within [first, last), move_backward must be used instead of move.
+ /// Note: if result is within [first, last), move_backward must be used instead of move.
///
/// Example usage:
/// eastl::move(myArray.begin(), myArray.end(), myDestArray.begin());
@@ -180,7 +180,7 @@ namespace eastl
/// starting from first and proceeding to last. For each nonnegative integer n < (last - first),
/// performs *(result + n) = *(first + n).
///
- /// Returns: result + (last - first). That is, returns the end of the result. Note that this
+ /// Returns: result + (last - first). That is, returns the end of the result. Note that this
/// is different from how memmove/memcpy work, as they return the beginning of the result.
///
/// Requires: result shall not be in the range [first, last). But the end of the result range
@@ -197,19 +197,4 @@ namespace eastl
}
} // namespace eastl
-#endif // Header include guard
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+#endif // EASTL_INTERNAL_COPY_HELP_H
diff --git a/include/EASTL/internal/fill_help.h b/include/EASTL/internal/fill_help.h
index 235a24ee..07e3b62d 100644
--- a/include/EASTL/internal/fill_help.h
+++ b/include/EASTL/internal/fill_help.h
@@ -85,7 +85,7 @@ namespace eastl
}
- #if(defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ #if (defined(EA_COMPILER_GNUC) || defined(__clang__)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
#if defined(EA_PROCESSOR_X86_64)
template
inline void fill(uint64_t* first, uint64_t* last, Value c)
@@ -327,7 +327,7 @@ namespace eastl
}
#endif
- #if(defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ #if (defined(EA_COMPILER_GNUC) || defined(__clang__)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
#if defined(EA_PROCESSOR_X86_64)
template
inline uint64_t* fill_n(uint64_t* first, Size n, Value c)
diff --git a/include/EASTL/internal/fixed_pool.h b/include/EASTL/internal/fixed_pool.h
index 5a380046..4d710354 100644
--- a/include/EASTL/internal/fixed_pool.h
+++ b/include/EASTL/internal/fixed_pool.h
@@ -1362,12 +1362,11 @@ namespace eastl
{
}
- // Disabled because the default is sufficient.
- //fixed_vector_allocator(const fixed_vector_allocator& x)
- //{
- // mpPoolBegin = x.mpPoolBegin;
- // mOverflowAllocator = x.mOverflowAllocator;
- //}
+ fixed_vector_allocator(const fixed_vector_allocator& x)
+ {
+ mpPoolBegin = x.mpPoolBegin;
+ mOverflowAllocator = x.mOverflowAllocator;
+ }
fixed_vector_allocator& operator=(const fixed_vector_allocator& x)
{
diff --git a/include/EASTL/internal/function.h b/include/EASTL/internal/function.h
index 6e857f0b..785969d2 100644
--- a/include/EASTL/internal/function.h
+++ b/include/EASTL/internal/function.h
@@ -5,6 +5,8 @@
#ifndef EASTL_FUNCTION_H
#define EASTL_FUNCTION_H
+#include
+
#if defined(EA_PRAGMA_ONCE_SUPPORTED)
#pragma once
#endif
diff --git a/include/EASTL/internal/function_detail.h b/include/EASTL/internal/function_detail.h
index dc18b631..3ee36677 100644
--- a/include/EASTL/internal/function_detail.h
+++ b/include/EASTL/internal/function_detail.h
@@ -95,7 +95,7 @@ namespace eastl
template
struct is_functor_inplace_allocatable
{
- static constexpr bool value =
+ static EA_CONSTEXPR bool value =
sizeof(Functor) <= sizeof(functor_storage) &&
(eastl::alignment_of_v> % eastl::alignment_of_v) == 0;
};
diff --git a/include/EASTL/internal/functional_base.h b/include/EASTL/internal/functional_base.h
index a7d2dc91..ef27800b 100644
--- a/include/EASTL/internal/functional_base.h
+++ b/include/EASTL/internal/functional_base.h
@@ -6,21 +6,23 @@
#ifndef EASTL_INTERNAL_FUNCTIONAL_BASE_H
#define EASTL_INTERNAL_FUNCTIONAL_BASE_H
+#include
+
#if defined(EA_PRAGMA_ONCE_SUPPORTED)
#pragma once
#endif
-#include
#include
#include
#include
+
namespace eastl
{
// foward declaration for swap
template
inline void swap(T& a, T& b)
- EA_NOEXCEPT_IF(eastl::is_nothrow_move_constructible::value&& eastl::is_nothrow_move_assignable::value);
+ EA_NOEXCEPT_IF(eastl::is_nothrow_move_constructible::value && eastl::is_nothrow_move_assignable::value);
/// invoke
@@ -39,44 +41,47 @@ namespace eastl
/// http://en.cppreference.com/w/cpp/utility/functional/invoke
///
template
- auto invoke_impl(R C::*func, T&& obj, Args&&... args) ->
- typename enable_if>::value,
+ EA_CONSTEXPR auto invoke_impl(R C::*func, T&& obj, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR((eastl::forward(obj).*func)(eastl::forward(args)...)))
+ -> typename enable_if>::value,
decltype((eastl::forward(obj).*func)(eastl::forward(args)...))>::type
{
return (eastl::forward(obj).*func)(eastl::forward(args)...);
}
template
- auto invoke_impl(F&& func, Args&&... args) -> decltype(eastl::forward(func)(eastl::forward(args)...))
+ EA_CONSTEXPR auto invoke_impl(F&& func, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(eastl::forward(func)(eastl::forward(args)...)))
+ -> decltype(eastl::forward(func)(eastl::forward(args)...))
{
return eastl::forward(func)(eastl::forward(args)...);
}
template
- auto invoke_impl(R C::*func, T&& obj, Args&&... args) -> decltype(((*eastl::forward(obj)).*func)(eastl::forward(args)...))
+ EA_CONSTEXPR auto invoke_impl(R C::*func, T&& obj, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(((*eastl::forward(obj)).*func)(eastl::forward(args)...)))
+ -> decltype(((*eastl::forward(obj)).*func)(eastl::forward(args)...))
{
return ((*eastl::forward(obj)).*func)(eastl::forward(args)...);
}
template
- auto invoke_impl(M C::*member, T&& obj) ->
- typename enable_if<
- is_base_of>::value,
- decltype(obj.*member)
+ EA_CONSTEXPR auto invoke_impl(M C::*member, T&& obj) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(eastl::forward(obj).*member))
+ -> typename enable_if<
+ is_base_of>::value,
+ decltype(eastl::forward(obj).*member)
>::type
{
- return obj.*member;
+ return eastl::forward(obj).*member;
}
template
- auto invoke_impl(M C::*member, T&& obj) -> decltype((*eastl::forward(obj)).*member)
+ EA_CONSTEXPR auto invoke_impl(M C::*member, T&& obj) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR((*eastl::forward(obj)).*member))
+ -> decltype((*eastl::forward(obj)).*member)
{
return (*eastl::forward(obj)).*member;
}
template
- inline decltype(auto) invoke(F&& func, Args&&... args)
+ EA_CONSTEXPR decltype(auto) invoke(F&& func, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(invoke_impl(eastl::forward(func), eastl::forward(args)...)))
{
return invoke_impl(eastl::forward(func), eastl::forward(args)...);
}
@@ -86,9 +91,9 @@ namespace eastl
};
template
- struct invoke_result_impl>(), eastl::declval()...))>, Args...>
+ struct invoke_result_impl(), eastl::declval()...))>, Args...>
{
- typedef decltype(invoke_impl(eastl::declval>(), eastl::declval()...)) type;
+ typedef decltype(invoke_impl(eastl::declval(), eastl::declval()...)) type;
};
template
@@ -118,13 +123,40 @@ namespace eastl
template
struct is_invocable_r : public is_invocable_r_impl {};
- #if EASTL_VARIABLE_TEMPLATES_ENABLED
- template
- EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_v = is_invocable::value;
+ template
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_v = is_invocable::value;
- template
- EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_r_v = is_invocable_r::value;
- #endif
+ template
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_r_v = is_invocable_r::value;
+
+ template
+ struct is_nothrow_invocable_impl : public eastl::false_type {};
+
+ template
+ struct is_nothrow_invocable_impl::type>, Args...>
+ : public eastl::bool_constant(), eastl::declval()...))> {};
+
+ template
+ struct is_nothrow_invocable : public is_nothrow_invocable_impl {};
+
+ template
+ struct is_nothrow_invocable_r_impl : public eastl::false_type {};
+
+ template
+ struct is_nothrow_invocable_r_impl::type>, Args...>
+ {
+ static EA_CONSTEXPR_OR_CONST bool value = eastl::is_convertible::type, R>::value
+ && eastl::is_nothrow_invocable::value;
+ };
+
+ template
+ struct is_nothrow_invocable_r : public is_nothrow_invocable_r_impl {};
+
+ template
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_no_throw_invocable_v = is_nothrow_invocable::value;
+
+ template
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_nothrow_invocable_r_v = is_nothrow_invocable_r::value;
/// allocator_arg_t
///
@@ -144,9 +176,7 @@ namespace eastl
/// such as tuple, function, promise, and packaged_task.
/// http://en.cppreference.com/w/cpp/memory/allocator_arg
///
- #if !defined(EA_COMPILER_NO_CONSTEXPR)
- EA_CONSTEXPR allocator_arg_t allocator_arg = allocator_arg_t();
- #endif
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR allocator_arg_t allocator_arg = allocator_arg_t();
template
@@ -248,7 +278,7 @@ namespace eastl
template
reference_wrapper ref(T& t) EA_NOEXCEPT
{
- return eastl::reference_wrapper(t);
+ return eastl::reference_wrapper(t);
}
template
@@ -307,16 +337,16 @@ namespace eastl
// These have to come after reference_wrapper is defined, but reference_wrapper needs to have a
// definition of invoke, so these specializations need to come after everything else has been defined.
template
- auto invoke_impl(R (C::*func)(Args...), T&& obj, Args&&... args) ->
- typename enable_if::type>::value,
+ EA_CONSTEXPR auto invoke_impl(R C::*func, T&& obj, Args&&... args) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR((obj.get().*func)(eastl::forward(args)...)))
+ -> typename enable_if>::value,
decltype((obj.get().*func)(eastl::forward(args)...))>::type
{
return (obj.get().*func)(eastl::forward(args)...);
}
template
- auto invoke_impl(M(C::*member), T&& obj) ->
- typename enable_if::type>::value,
+ EA_CONSTEXPR auto invoke_impl(M C::*member, T&& obj) EA_NOEXCEPT_IF(EA_NOEXCEPT_EXPR(obj.get().*member))
+ -> typename enable_if>::value,
decltype(obj.get().*member)>::type
{
return obj.get().*member;
@@ -386,4 +416,4 @@ namespace eastl
} // namespace eastl
-#endif // Header include guard
+#endif // EASTL_INTERNAL_FUNCTIONAL_BASE_H
diff --git a/include/EASTL/internal/hashtable.h b/include/EASTL/internal/hashtable.h
index bb6d27eb..a9347b18 100644
--- a/include/EASTL/internal/hashtable.h
+++ b/include/EASTL/internal/hashtable.h
@@ -1572,7 +1572,7 @@ namespace eastl
typename hashtable::node_type*
hashtable::DoAllocateNodeFromKey(const key_type& key)
{
- node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
#if EASTL_EXCEPTIONS_ENABLED
@@ -1598,7 +1598,7 @@ namespace eastl
typename hashtable::node_type*
hashtable::DoAllocateNodeFromKey(key_type&& key)
{
- node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
#if EASTL_EXCEPTIONS_ENABLED
@@ -2105,7 +2105,7 @@ namespace eastl
typename hashtable::node_type*
hashtable::DoAllocateNode(Args&&... args)
{
- node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
#if EASTL_EXCEPTIONS_ENABLED
@@ -2283,7 +2283,7 @@ namespace eastl
typename hashtable::node_type*
hashtable::DoAllocateNode(value_type&& value)
{
- node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
#if EASTL_EXCEPTIONS_ENABLED
@@ -2453,7 +2453,7 @@ namespace eastl
typename hashtable::node_type*
hashtable::DoAllocateNode(const value_type& value)
{
- node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
#if EASTL_EXCEPTIONS_ENABLED
@@ -2480,7 +2480,7 @@ namespace eastl
hashtable::allocate_uninitialized_node()
{
// We don't wrap this in try/catch because users of this function are expected to do that themselves as needed.
- node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
// Leave pNode->mValue uninitialized.
pNode->mpNext = NULL;
diff --git a/include/EASTL/internal/integer_sequence.h b/include/EASTL/internal/integer_sequence.h
index 88cf1b1b..2a5539dd 100644
--- a/include/EASTL/internal/integer_sequence.h
+++ b/include/EASTL/internal/integer_sequence.h
@@ -67,6 +67,36 @@ using make_integer_sequence = typename make_integer_sequence_impl::type;
template
using index_sequence_for = make_index_sequence;
+namespace internal
+{
+
+template