aboutsummaryrefslogtreecommitdiff
path: root/include/EASTL/internal
diff options
context:
space:
mode:
authorToni Uhlig <matzeton@googlemail.com>2021-04-08 16:43:58 +0200
committerToni Uhlig <matzeton@googlemail.com>2021-04-08 16:43:58 +0200
commite59cf7b09e7388d369e8d2bf73501cde79c28708 (patch)
tree6099307032bb86f4a969721f9ac447d3d1be67d4 /include/EASTL/internal
Squashed 'EASTL/' content from commit fad5471
git-subtree-dir: EASTL git-subtree-split: fad54717f8e4ebb13b20095da7efd07a53af0f10
Diffstat (limited to 'include/EASTL/internal')
-rw-r--r--include/EASTL/internal/atomic/arch/arch.h65
-rw-r--r--include/EASTL/internal/atomic/arch/arch_add_fetch.h173
-rw-r--r--include/EASTL/internal/atomic/arch/arch_and_fetch.h173
-rw-r--r--include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h430
-rw-r--r--include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h430
-rw-r--r--include/EASTL/internal/atomic/arch/arch_compiler_barrier.h19
-rw-r--r--include/EASTL/internal/atomic/arch/arch_cpu_pause.h25
-rw-r--r--include/EASTL/internal/atomic/arch/arch_exchange.h173
-rw-r--r--include/EASTL/internal/atomic/arch/arch_fetch_add.h173
-rw-r--r--include/EASTL/internal/atomic/arch/arch_fetch_and.h173
-rw-r--r--include/EASTL/internal/atomic/arch/arch_fetch_or.h173
-rw-r--r--include/EASTL/internal/atomic/arch/arch_fetch_sub.h173
-rw-r--r--include/EASTL/internal/atomic/arch/arch_fetch_xor.h173
-rw-r--r--include/EASTL/internal/atomic/arch/arch_load.h125
-rw-r--r--include/EASTL/internal/atomic/arch/arch_memory_barrier.h47
-rw-r--r--include/EASTL/internal/atomic/arch/arch_or_fetch.h173
-rw-r--r--include/EASTL/internal/atomic/arch/arch_signal_fence.h21
-rw-r--r--include/EASTL/internal/atomic/arch/arch_store.h113
-rw-r--r--include/EASTL/internal/atomic/arch/arch_sub_fetch.h173
-rw-r--r--include/EASTL/internal/atomic/arch/arch_thread_fence.h49
-rw-r--r--include/EASTL/internal/atomic/arch/arch_xor_fetch.h173
-rw-r--r--include/EASTL/internal/atomic/arch/arm/arch_arm.h89
-rw-r--r--include/EASTL/internal/atomic/arch/arm/arch_arm_load.h156
-rw-r--r--include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h97
-rw-r--r--include/EASTL/internal/atomic/arch/arm/arch_arm_store.h142
-rw-r--r--include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h37
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86.h167
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h96
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h96
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h69
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h52
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h91
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h90
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h90
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h90
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h90
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h90
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_load.h168
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h104
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h96
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_store.h171
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h96
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h42
-rw-r--r--include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h96
-rw-r--r--include/EASTL/internal/atomic/atomic.h252
-rw-r--r--include/EASTL/internal/atomic/atomic_asserts.h75
-rw-r--r--include/EASTL/internal/atomic/atomic_base_width.h346
-rw-r--r--include/EASTL/internal/atomic/atomic_casts.h190
-rw-r--r--include/EASTL/internal/atomic/atomic_flag.h170
-rw-r--r--include/EASTL/internal/atomic/atomic_flag_standalone.h69
-rw-r--r--include/EASTL/internal/atomic/atomic_integral.h343
-rw-r--r--include/EASTL/internal/atomic/atomic_macros.h67
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros.h145
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h98
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h98
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h65
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h245
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h245
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h30
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h22
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h98
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h98
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h98
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h98
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h98
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h98
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h75
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h38
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h98
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h34
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h68
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h98
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h34
-rw-r--r--include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h98
-rw-r--r--include/EASTL/internal/atomic/atomic_memory_order.h44
-rw-r--r--include/EASTL/internal/atomic/atomic_pointer.h281
-rw-r--r--include/EASTL/internal/atomic/atomic_pop_compiler_options.h11
-rw-r--r--include/EASTL/internal/atomic/atomic_push_compiler_options.h17
-rw-r--r--include/EASTL/internal/atomic/atomic_size_aligned.h197
-rw-r--r--include/EASTL/internal/atomic/atomic_standalone.h470
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler.h120
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_add_fetch.h173
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_and_fetch.h173
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_barrier.h36
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h430
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h430
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h32
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_exchange.h173
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_fetch_add.h173
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_fetch_and.h173
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_fetch_or.h173
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h173
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h173
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_load.h139
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h47
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_or_fetch.h173
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_signal_fence.h49
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_store.h113
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h173
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_thread_fence.h49
-rw-r--r--include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h173
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h154
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h30
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h182
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h182
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h31
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h90
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h38
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h89
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h38
-rw-r--r--include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h260
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h104
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h121
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h31
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h195
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h162
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h27
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h125
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h101
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h104
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h118
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h121
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h34
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h107
-rw-r--r--include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h121
-rw-r--r--include/EASTL/internal/char_traits.h464
-rw-r--r--include/EASTL/internal/config.h1877
-rw-r--r--include/EASTL/internal/copy_help.h215
-rw-r--r--include/EASTL/internal/enable_shared.h83
-rw-r--r--include/EASTL/internal/fill_help.h484
-rw-r--r--include/EASTL/internal/fixed_pool.h1631
-rw-r--r--include/EASTL/internal/function.h161
-rw-r--r--include/EASTL/internal/function_detail.h673
-rw-r--r--include/EASTL/internal/function_help.h51
-rw-r--r--include/EASTL/internal/functional_base.h389
-rw-r--r--include/EASTL/internal/generic_iterator.h208
-rw-r--r--include/EASTL/internal/hashtable.h3222
-rw-r--r--include/EASTL/internal/in_place_t.h82
-rw-r--r--include/EASTL/internal/integer_sequence.h74
-rw-r--r--include/EASTL/internal/intrusive_hashtable.h989
-rw-r--r--include/EASTL/internal/mem_fn.h304
-rw-r--r--include/EASTL/internal/memory_base.h37
-rw-r--r--include/EASTL/internal/move_help.h162
-rw-r--r--include/EASTL/internal/pair_fwd_decls.h16
-rw-r--r--include/EASTL/internal/piecewise_construct_t.h46
-rw-r--r--include/EASTL/internal/red_black_tree.h2400
-rw-r--r--include/EASTL/internal/smart_ptr.h264
-rw-r--r--include/EASTL/internal/thread_support.h244
-rw-r--r--include/EASTL/internal/tuple_fwd_decls.h56
-rw-r--r--include/EASTL/internal/type_compound.h800
-rw-r--r--include/EASTL/internal/type_fundamental.h289
-rw-r--r--include/EASTL/internal/type_pod.h1945
-rw-r--r--include/EASTL/internal/type_properties.h380
-rw-r--r--include/EASTL/internal/type_transformations.h606
167 files changed, 35752 insertions, 0 deletions
diff --git a/include/EASTL/internal/atomic/arch/arch.h b/include/EASTL/internal/atomic/arch/arch.h
new file mode 100644
index 0000000..4924a59
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch.h
@@ -0,0 +1,65 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// Include the architecture specific implementations
+//
+#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)
+
+ #include "x86/arch_x86.h"
+
+#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64)
+
+ #include "arm/arch_arm.h"
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "arch_fetch_add.h"
+#include "arch_fetch_sub.h"
+
+#include "arch_fetch_and.h"
+#include "arch_fetch_xor.h"
+#include "arch_fetch_or.h"
+
+#include "arch_add_fetch.h"
+#include "arch_sub_fetch.h"
+
+#include "arch_and_fetch.h"
+#include "arch_xor_fetch.h"
+#include "arch_or_fetch.h"
+
+#include "arch_exchange.h"
+
+#include "arch_cmpxchg_weak.h"
+#include "arch_cmpxchg_strong.h"
+
+#include "arch_load.h"
+#include "arch_store.h"
+
+#include "arch_compiler_barrier.h"
+
+#include "arch_cpu_pause.h"
+
+#include "arch_memory_barrier.h"
+
+#include "arch_signal_fence.h"
+
+#include "arch_thread_fence.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_add_fetch.h b/include/EASTL/internal/atomic/arch/arch_add_fetch.h
new file mode 100644
index 0000000..65771f8
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_add_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ADD_FETCH_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_and_fetch.h b/include/EASTL/internal/atomic/arch/arch_and_fetch.h
new file mode 100644
index 0000000..df7ba35
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_and_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_AND_FETCH_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h b/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h
new file mode 100644
index 0000000..1005dc3
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_cmpxchg_strong.h
@@ -0,0 +1,430 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_STRONG_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h b/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h
new file mode 100644
index 0000000..5ce2638
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_cmpxchg_weak.h
@@ -0,0 +1,430 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128_AVAILABLE \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE
+#define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CMPXCHG_WEAK_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h b/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h
new file mode 100644
index 0000000..0652469
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_compiler_barrier.h
@@ -0,0 +1,19 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_ARCH_ATOMIC_COMPILER_BARRIER_AVAILABLE 0
+
+#define EASTL_ARCH_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 0
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_COMPILER_BARRIER_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_cpu_pause.h b/include/EASTL/internal/atomic/arch/arch_cpu_pause.h
new file mode 100644
index 0000000..e8c2d1d
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_cpu_pause.h
@@ -0,0 +1,25 @@
+/////////////////////////////////////////////////////////////////////////////////
+// copyright (c) electronic arts inc. all rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_PAUSE()
+//
+#if defined(EASTL_ARCH_ATOMIC_CPU_PAUSE)
+ #define EASTL_ARCH_ATOMIC_CPU_PAUSE_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CPU_PAUSE_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_CPU_PAUSE_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_exchange.h b/include/EASTL/internal/atomic/arch/arch_exchange.h
new file mode 100644
index 0000000..7600318
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_exchange.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_EXCHANGE_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_fetch_add.h b/include/EASTL/internal/atomic/arch/arch_fetch_add.h
new file mode 100644
index 0000000..71907f7
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_fetch_add.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_ADD_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_fetch_and.h b/include/EASTL/internal/atomic/arch/arch_fetch_and.h
new file mode 100644
index 0000000..f2b39a4
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_fetch_and.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_AND_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_fetch_or.h b/include/EASTL/internal/atomic/arch/arch_fetch_or.h
new file mode 100644
index 0000000..dd6dd0d
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_fetch_or.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_OR_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_fetch_sub.h b/include/EASTL/internal/atomic/arch/arch_fetch_sub.h
new file mode 100644
index 0000000..ea63db7
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_fetch_sub.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_SUB_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_fetch_xor.h b/include/EASTL/internal/atomic/arch/arch_fetch_xor.h
new file mode 100644
index 0000000..b41ad2d
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_fetch_xor.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_FETCH_XOR_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_load.h b/include/EASTL/internal/atomic/arch/arch_load.h
new file mode 100644
index 0000000..eea7cf4
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_load.h
@@ -0,0 +1,125 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32)
+ #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64)
+ #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_LOAD_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_memory_barrier.h b/include/EASTL/internal/atomic/arch/arch_memory_barrier.h
new file mode 100644
index 0000000..c6cc6bf
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_memory_barrier.h
@@ -0,0 +1,47 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_MB()
+//
+#if defined(EASTL_ARCH_ATOMIC_CPU_MB)
+ #define EASTL_ARCH_ATOMIC_CPU_MB_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CPU_MB_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_WMB()
+//
+#if defined(EASTL_ARCH_ATOMIC_CPU_WMB)
+ #define EASTL_ARCH_ATOMIC_CPU_WMB_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CPU_WMB_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_RMB()
+//
+#if defined(EASTL_ARCH_ATOMIC_CPU_RMB)
+ #define EASTL_ARCH_ATOMIC_CPU_RMB_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_CPU_RMB_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_MEMORY_BARRIER_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_or_fetch.h b/include/EASTL/internal/atomic/arch/arch_or_fetch.h
new file mode 100644
index 0000000..110326b
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_or_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_OR_FETCH_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_signal_fence.h b/include/EASTL/internal/atomic/arch/arch_signal_fence.h
new file mode 100644
index 0000000..65b64fc
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_signal_fence.h
@@ -0,0 +1,21 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 0
+#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 0
+#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 0
+#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 0
+#define EASTL_ARCH_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 0
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_SIGNAL_FENCE_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_store.h b/include/EASTL/internal/atomic/arch/arch_store.h
new file mode 100644
index 0000000..9a4112c
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_store.h
@@ -0,0 +1,113 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_STORE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_STORE_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_sub_fetch.h b/include/EASTL/internal/atomic/arch/arch_sub_fetch.h
new file mode 100644
index 0000000..20241b1
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_sub_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_SUB_FETCH_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_thread_fence.h b/include/EASTL/internal/atomic/arch/arch_thread_fence.h
new file mode 100644
index 0000000..676fbf1
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_thread_fence.h
@@ -0,0 +1,49 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*()
+//
+#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED)
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE)
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE)
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL)
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST)
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_THREAD_FENCE_H */
diff --git a/include/EASTL/internal/atomic/arch/arch_xor_fetch.h b/include/EASTL/internal/atomic/arch/arch_xor_fetch.h
new file mode 100644
index 0000000..63548c2
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arch_xor_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128)
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_XOR_FETCH_H */
diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm.h b/include/EASTL/internal/atomic/arch/arm/arch_arm.h
new file mode 100644
index 0000000..cc2ce52
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arm/arch_arm.h
@@ -0,0 +1,89 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/**
+ * NOTE: We use this mapping
+ *
+ * ARMv7 Mapping 'trailing sync;':
+ *
+ * Load Relaxed : ldr
+ * Load Acquire : ldr; dmb ish
+ * Load Seq_Cst : ldr; dmb ish
+ *
+ * Store Relaxed : str
+ * Store Release : dmb ish; str
+ * Store Seq_Cst : dmb ish; str; dmb ish
+ *
+ * Relaxed Fence :
+ * Acquire Fence : dmb ish
+ * Release Fence : dmb ish
+ * Acq_Rel Fence : dmb ish
+ * Seq_Cst Fence : dmb ish
+ */
+
+/**
+ * ARMv7 Mapping 'leading sync;';
+ *
+ * Load Relaxed : ldr
+ * Load Acquire : ldr; dmb ish
+ * Load Seq_Cst : dmb ish; ldr; dmb ish
+ *
+ * Store Relaxed : str
+ * Store Release : dmb ish; str
+ * Store Seq_Cst : dmb ish: str
+ *
+ * Relaxed Fence :
+ * Acquire Fence : dmb ish
+ * Release Fence : dmb ish
+ * Acq_Rel Fence : dmb ish
+ * Seq_Cst Fence : dmb ish
+ */
+
+/**
+ * NOTE:
+ *
+ * On ARM32/64, we use the 'trailing sync;' convention with the stricter load acquire that uses
+ * a dmb instead of a control dependency + isb to ensure the IRIW litmus test is satisfied
+ * as one reason. See EASTL/atomic.h for futher explanation and deep-dive.
+ *
+ * For ARMv8 we could move to use the new proper store release and load acquire, RCsc variant.
+ * All ARMv7 approaches work on ARMv8 and this code path is only used on msvc which isn't used
+ * heavily. Most of the ARM code will end up going thru clang or gcc since microsoft arm devices
+ * aren't that abundant.
+ */
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#if defined(EA_COMPILER_MSVC)
+
+ #if EA_PLATFORM_PTR_SIZE == 8
+ #define EASTL_ARCH_ATOMIC_HAS_128BIT
+ #endif
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "arch_arm_load.h"
+#include "arch_arm_store.h"
+
+#include "arch_arm_memory_barrier.h"
+
+#include "arch_arm_thread_fence.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_H */
diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h b/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h
new file mode 100644
index 0000000..e3b79b8
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arm/arch_arm_load.h
@@ -0,0 +1,156 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+#if defined(EA_COMPILER_MSVC)
+
+
+ /**
+ * NOTE:
+ *
+ * Even 8-byte aligned 64-bit memory accesses on ARM32 are not
+ * guaranteed to be atomic on all ARM32 cpus. Only guaranteed on
+ * cpus with the LPAE extension. We need to use a
+ * ldrexd instruction in order to ensure no shearing is observed
+ * for all ARM32 processors.
+ */
+ #if defined(EA_PROCESSOR_ARM32)
+
+ #define EASTL_ARCH_ATOMIC_ARM32_LDREXD(ret, ptr) \
+ ret = __ldrexd((ptr))
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_N(integralType, bits, type, ret, ptr) \
+ { \
+ integralType retIntegral; \
+ retIntegral = EA_PREPROCESSOR_JOIN(__iso_volatile_load, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int8, 8, type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int16, 16, type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int32, 32, type, ret, ptr)
+
+
+ #if defined(EA_PROCESSOR_ARM32)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_64(type, ret, ptr) \
+ { \
+ __int64 loadRet64; \
+ EASTL_ARCH_ATOMIC_ARM32_LDREXD(loadRet64, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__int64, (ptr))); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, loadRet64); \
+ }
+
+ #else
+
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_N(__int64, 64, type, ret, ptr)
+
+ #endif
+
+
+ /**
+ * NOTE:
+ *
+ * The ARM documentation states the following:
+ * A 64-bit pair requires the address to be quadword aligned and is single-copy atomic for each doubleword at doubleword granularity
+ *
+ * Thus we must ensure the store succeeds inorder for the load to be observed as atomic.
+ * Thus we must use the full cmpxchg in order to do a proper atomic load.
+ */
+ #define EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, MemoryOrder) \
+ { \
+ bool cmpxchgRetBool; \
+ ret = *(ptr); \
+ do \
+ { \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, \
+ ptr, &(ret), ret); \
+ } while (!cmpxchgRetBool); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, RELAXED)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, ACQUIRE)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_8(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_16(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_32(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_64(type, ret, ptr); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_ARM_LOAD_128(type, ret, ptr, SEQ_CST)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_LOAD_H */
diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h b/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h
new file mode 100644
index 0000000..c52962e
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arm/arch_arm_memory_barrier.h
@@ -0,0 +1,97 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_COMPILER_MSVC)
+
+ #if defined(EA_PROCESSOR_ARM32)
+
+ #define EASTL_ARM_DMB_ISH _ARM_BARRIER_ISH
+
+ #define EASTL_ARM_DMB_ISHST _ARM_BARRIER_ISHST
+
+ #define EASTL_ARM_DMB_ISHLD _ARM_BARRIER_ISH
+
+ #elif defined(EA_PROCESSOR_ARM64)
+
+ #define EASTL_ARM_DMB_ISH _ARM64_BARRIER_ISH
+
+ #define EASTL_ARM_DMB_ISHST _ARM64_BARRIER_ISHST
+
+ #define EASTL_ARM_DMB_ISHLD _ARM64_BARRIER_ISHLD
+
+ #endif
+
+
+ /**
+ * NOTE:
+ *
+ * While it makes no sense for a hardware memory barrier to not imply a compiler barrier.
+ * MSVC docs do not explicitly state that, so better to be safe than sorry chasing down
+ * hard to find bugs due to the compiler deciding to reorder things.
+ */
+
+ #define EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(option) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ __dmb(option); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+
+#elif defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)
+
+ #define EASTL_ARM_DMB_ISH ish
+
+ #define EASTL_ARM_DMB_ISHST ishst
+
+ #if defined(EA_PROCESSOR_ARM32)
+
+ #define EASTL_ARM_DMB_ISHLD ish
+
+ #elif defined(EA_PROCESSOR_ARM64)
+
+ #define EASTL_ARM_DMB_ISHLD ishld
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(option) \
+ __asm__ __volatile__ ("dmb " EA_STRINGIFY(option) ::: "memory")
+
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_MB()
+//
+#define EASTL_ARCH_ATOMIC_CPU_MB() \
+ EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISH)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_WMB()
+//
+#define EASTL_ARCH_ATOMIC_CPU_WMB() \
+ EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISHST)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_RMB()
+//
+#define EASTL_ARCH_ATOMIC_CPU_RMB() \
+ EASTL_ARCH_ATOMIC_ARM_EMIT_DMB(EASTL_ARM_DMB_ISHLD)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_MEMORY_BARRIER_H */
diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h b/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h
new file mode 100644
index 0000000..ab53b9d
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arm/arch_arm_store.h
@@ -0,0 +1,142 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC)
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_N(integralType, bits, type, ptr, val) \
+ EA_PREPROCESSOR_JOIN(__iso_volatile_store, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)))
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_N(__int8, 8, type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_N(__int16, 16, type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_N(__int32, 32, type, ptr, val)
+
+
+ #if defined(EA_PROCESSOR_ARM64)
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_N(__int64, 64, type, ptr, val)
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, MemoryOrder) \
+ { \
+ type exchange128; EA_UNUSED(exchange128); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, RELAXED)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, RELEASE)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_8(type, ptr, val) ; \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_16(type, ptr, val); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_32(type, ptr, val); \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_128(type, ptr, val, SEQ_CST)
+
+
+ #if defined(EA_PROCESSOR_ARM32)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \
+ { \
+ type retExchange64; EA_UNUSED(retExchange64); \
+ EASTL_ATOMIC_EXCHANGE_RELAXED_64(type, retExchange64, ptr, val); \
+ }
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \
+ { \
+ type retExchange64; EA_UNUSED(retExchange64); \
+ EASTL_ATOMIC_EXCHANGE_RELEASE_64(type, retExchange64, ptr, val); \
+ }
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ { \
+ type retExchange64; EA_UNUSED(retExchange64); \
+ EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, retExchange64, ptr, val); \
+ }
+
+
+ #elif defined(EA_PROCESSOR_ARM64)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ EASTL_ATOMIC_CPU_MB(); \
+ EASTL_ARCH_ATOMIC_ARM_STORE_64(type, ptr, val); \
+ EASTL_ATOMIC_CPU_MB()
+
+
+ #endif
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_STORE_H */
diff --git a/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h b/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h
new file mode 100644
index 0000000..391c64e
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/arm/arch_arm_thread_fence.h
@@ -0,0 +1,37 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*()
+//
+#if defined(EA_COMPILER_MSVC)
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE() \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE() \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL() \
+ EASTL_ATOMIC_CPU_MB()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST() \
+ EASTL_ATOMIC_CPU_MB()
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_ARM_THREAD_FENCE_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86.h b/include/EASTL/internal/atomic/arch/x86/arch_x86.h
new file mode 100644
index 0000000..5087c13
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86.h
@@ -0,0 +1,167 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/**
+ * x86 && x64 Mappings
+ *
+ * Load Relaxed : MOV
+ * Load Acquire : MOV; COMPILER_BARRIER;
+ * Load Seq_Cst : MOV; COMPILER_BARRIER;
+ *
+ * Store Relaxed : MOV
+ * Store Release : COMPILER_BARRIER; MOV;
+ * Store Seq_Cst : LOCK XCHG : MOV; MFENCE;
+ *
+ * Relaxed Fence :
+ * Acquire Fence : COMPILER_BARRIER
+ * Release Fence : COMPILER_BARRIER
+ * Acq_Rel Fence : COMPILER_BARRIER
+ * Seq_Cst FENCE : MFENCE
+ */
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#if defined(EA_COMPILER_MSVC)
+
+ #if EA_PLATFORM_PTR_SIZE == 8
+ #define EASTL_ARCH_ATOMIC_HAS_128BIT
+ #endif
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+ #define EASTL_ARCH_ATOMIC_HAS_128BIT
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/**
+ * NOTE:
+ *
+ * On 32-bit x86 CPUs Intel Pentium and newer, AMD K5 and newer
+ * and any i586 class of x86 CPUs support only 64-bit cmpxchg
+ * known as cmpxchg8b.
+ *
+ * On these class of cpus we can guarantee that 64-bit loads/stores are
+ * also atomic by using the SSE2 movq, SSE1 movlps, or x87 fild/fstp instructions.
+ *
+ * We support all other atomic operations
+ * on compilers that only provide this 64-bit cmpxchg instruction
+ * by wrapping them around the 64-bit cmpxchg8b instruction.
+ */
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!");
+
+ #define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \
+ { \
+ bool cmpxchgRet; \
+ EASTL_ATOMIC_LOAD_RELAXED_64(type, ret, ptr); \
+ do \
+ { \
+ type computedDesired; \
+ PRE_COMPUTE_DESIRED(computedDesired, ret, (val)); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _64)(type, cmpxchgRet, ptr, &(ret), computedDesired); \
+ } while (!cmpxchgRet); \
+ POST_COMPUTE_RET(ret, ret, (val)); \
+ }
+
+
+#endif
+
+
+/**
+ * NOTE:
+ *
+ * 64-bit x64 CPUs support only 128-bit cmpxchg known as cmpxchg16b.
+ *
+ * We support all other atomic operations by wrapping them around
+ * the 128-bit cmpxchg16b instruction.
+ *
+ * 128-bit loads are only atomic by using the cmpxchg16b instruction.
+ * SSE 128-bit loads are not guaranteed to be atomic even though some CPUs
+ * make them atomic such as AMD Ryzen or Intel SandyBridge.
+ */
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!");
+
+ #define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \
+ { \
+ bool cmpxchgRet; \
+ /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \
+ /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \
+ /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \
+ /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \
+ ret = *(ptr); \
+ do \
+ { \
+ type computedDesired; \
+ PRE_COMPUTE_DESIRED(computedDesired, ret, (val)); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), computedDesired); \
+ } while (!cmpxchgRet); \
+ POST_COMPUTE_RET(ret, ret, (val)); \
+ }
+
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "arch_x86_fetch_add.h"
+#include "arch_x86_fetch_sub.h"
+
+#include "arch_x86_fetch_and.h"
+#include "arch_x86_fetch_xor.h"
+#include "arch_x86_fetch_or.h"
+
+#include "arch_x86_add_fetch.h"
+#include "arch_x86_sub_fetch.h"
+
+#include "arch_x86_and_fetch.h"
+#include "arch_x86_xor_fetch.h"
+#include "arch_x86_or_fetch.h"
+
+#include "arch_x86_exchange.h"
+
+#include "arch_x86_cmpxchg_weak.h"
+#include "arch_x86_cmpxchg_strong.h"
+
+#include "arch_x86_memory_barrier.h"
+
+#include "arch_x86_thread_fence.h"
+
+#include "arch_x86_load.h"
+#include "arch_x86_store.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h
new file mode 100644
index 0000000..4534806
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_add_fetch.h
@@ -0,0 +1,96 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) + (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) + (val))
+
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) + (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) + (val))
+
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_ADD_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_ADD_FETCH_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h
new file mode 100644
index 0000000..c38ba41
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_and_fetch.h
@@ -0,0 +1,96 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) & (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) & (val))
+
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) & (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) & (val))
+
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_AND_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_AND_FETCH_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h
new file mode 100644
index 0000000..e028398
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_strong.h
@@ -0,0 +1,69 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired) \
+ { \
+ /* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \
+ __asm__ __volatile__ ("lock; cmpxchg16b %2\n" /* cmpxchg16b sets/clears ZF */ \
+ "sete %3" /* If ZF == 1, set the return value to 1 */ \
+ /* Output Operands */ \
+ : "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]), \
+ "+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))), \
+ "=rm"((ret)) \
+ /* Input Operands */ \
+ : "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(desired)))[1]), \
+ "a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, (expected)))[1]) \
+ /* Clobbers */ \
+ : "memory", "cc"); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ARCH_ATOMIC_X86_CMPXCHG_STRONG_128_IMPL(type, ret, ptr, expected, desired)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_STRONG_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h
new file mode 100644
index 0000000..f8b956a
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_cmpxchg_weak.h
@@ -0,0 +1,52 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+ #define EASTL_ARCH_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_CMPXCHG_WEAK_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h
new file mode 100644
index 0000000..0f05800
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_exchange.h
@@ -0,0 +1,91 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = (val)
+
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \
+ { \
+ bool cmpxchgRet; \
+ /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \
+ /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \
+ /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \
+ /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \
+ ret = *(ptr); \
+ do \
+ { \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), val); \
+ } while (!cmpxchgRet); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, RELAXED)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, ACQUIRE)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, RELEASE)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, ACQ_REL)
+
+ #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, SEQ_CST)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h
new file mode 100644
index 0000000..d78b333
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_add.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) + (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) + (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_ADD_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_ADD_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h
new file mode 100644
index 0000000..fd7dbb9
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_and.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) & (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) & (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_AND_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_AND_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h
new file mode 100644
index 0000000..50da6db
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_or.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) | (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) | (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_OR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_OR_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h
new file mode 100644
index 0000000..77bee83
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_sub.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) - (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) - (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_SUB_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_SUB_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h
new file mode 100644
index 0000000..2e76b0c
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_fetch_xor.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) ^ (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) ^ (val))
+
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_FETCH_XOR_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_FETCH_XOR_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h
new file mode 100644
index 0000000..b044190
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_load.h
@@ -0,0 +1,168 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+#if defined(EA_COMPILER_MSVC)
+
+
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1920) // >= VS2019
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_N(integralType, bits, type, ret, ptr) \
+ { \
+ integralType retIntegral; \
+ retIntegral = EA_PREPROCESSOR_JOIN(__iso_volatile_load, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr))); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+ #else
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_N(integralType, bits, type, ret, ptr) \
+ { \
+ integralType retIntegral; \
+ retIntegral = (*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)))); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, MemoryOrder) \
+ { \
+ EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 expected{0, 0}; \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expected); \
+ \
+ bool cmpxchgRetBool; EA_UNUSED(cmpxchgRetBool); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRetBool, ptr, &(ret), ret); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_N(__int8, 8, type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_N(__int16, 16, type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_N(__int32, 32, type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_N(__int64, 64, type, ret, ptr)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, RELAXED)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, ACQUIRE)
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_8(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_16(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_32(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_64(type, ret, ptr); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, SEQ_CST)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ /**
+ * NOTE:
+ *
+ * Since the cmpxchg 128-bit inline assembly does a sete in the asm to set the return boolean,
+ * it doesn't get dead-store removed even though we don't care about the success of the
+ * cmpxchg since the compiler cannot reason about what is inside asm blocks.
+ * Thus this variant just does the minimum required to do an atomic load.
+ */
+ #define EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, MemoryOrder) \
+ { \
+ EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 expected = 0; \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, expected); \
+ \
+ /* Compare RDX:RAX with m128. If equal, set ZF and load RCX:RBX into m128. Else, clear ZF and load m128 into RDX:RAX. */ \
+ __asm__ __volatile__ ("lock; cmpxchg16b %2" /* cmpxchg16b sets/clears ZF */ \
+ /* Output Operands */ \
+ : "=a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "=d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \
+ "+m"(*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(__uint128_t, (ptr)))) \
+ /* Input Operands */ \
+ : "b"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "c"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]), \
+ "a"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[0]), "d"((EASTL_ATOMIC_TYPE_CAST(uint64_t, &(ret)))[1]) \
+ /* Clobbers */ \
+ : "memory", "cc"); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, RELAXED)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, ACQUIRE)
+
+ #define EASTL_ARCH_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
+ EASTL_ARCH_ATOMIC_X86_LOAD_128(type, ret, ptr, SEQ_CST)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_LOAD_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h
new file mode 100644
index 0000000..1d1c8fc
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_memory_barrier.h
@@ -0,0 +1,104 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_MB()
+//
+#if defined(EA_COMPILER_MSVC)
+
+ /**
+ * NOTE:
+ * While it makes no sense for a hardware memory barrier to not imply a compiler barrier.
+ * MSVC docs do not explicitly state that, so better to be safe than sorry chasing down
+ * hard to find bugs due to the compiler deciding to reorder things.
+ */
+
+ #if 1
+
+ // 4459 : declaration of 'identifier' hides global declaration
+ // 4456 : declaration of 'identifier' hides previous local declaration
+ #define EASTL_ARCH_ATOMIC_CPU_MB() \
+ { \
+ EA_DISABLE_VC_WARNING(4459 4456); \
+ volatile long _; \
+ _InterlockedExchangeAdd(&_, 0); \
+ EA_RESTORE_VC_WARNING(); \
+ }
+
+ #else
+
+ #define EASTL_ARCH_ATOMIC_CPU_MB() \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ _mm_mfence(); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #endif
+
+#elif defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)
+
+ /**
+ * NOTE:
+ *
+ * mfence orders all loads/stores to/from all memory types.
+ * We only care about ordinary cacheable memory so lighter weight locked instruction
+ * is far faster than a mfence to get a full memory barrier.
+ * lock; addl against the top of the stack is good because:
+ * distinct for every thread so prevents false sharing
+ * that cacheline is most likely cache hot
+ *
+ * We intentionally do it below the stack pointer to avoid false RAW register dependencies,
+ * in cases where the compiler reads from the stack pointer after the lock; addl instruction
+ *
+ * Accounting for Red Zones or Cachelines doesn't provide extra benefit.
+ */
+
+ #if defined(EA_PROCESSOR_X86)
+
+ #define EASTL_ARCH_ATOMIC_CPU_MB() \
+ __asm__ __volatile__ ("lock; addl $0, -4(%%esp)" ::: "memory", "cc")
+
+ #elif defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_ARCH_ATOMIC_CPU_MB() \
+ __asm__ __volatile__ ("lock; addl $0, -8(%%rsp)" ::: "memory", "cc")
+
+ #else
+
+ #define EASTL_ARCH_ATOMIC_CPU_MB() \
+ __asm__ __volatile__ ("mfence" ::: "memory")
+
+ #endif
+
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_WMB()
+//
+#define EASTL_ARCH_ATOMIC_CPU_WMB() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_CPU_RMB()
+//
+#define EASTL_ARCH_ATOMIC_CPU_RMB() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_MEMORY_BARRIER_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h
new file mode 100644
index 0000000..751cc2a
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_or_fetch.h
@@ -0,0 +1,96 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) | (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) | (val))
+
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) | (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) | (val))
+
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_OR_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_OR_FETCH_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h
new file mode 100644
index 0000000..397ff5f
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_store.h
@@ -0,0 +1,171 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC)
+
+
+ #if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1920) // >= VS2019
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_N(integralType, bits, type, ptr, val) \
+ EA_PREPROCESSOR_JOIN(__iso_volatile_store, bits)(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)))
+
+ #else
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_N(integralType, bits, type, ptr, val) \
+ { \
+ integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \
+ \
+ (*(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)))) = valIntegral; \
+ }
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, MemoryOrder) \
+ { \
+ type exchange128; EA_UNUSED(exchange128); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_N(__int8, 8, type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_N(__int16, 16, type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_N(__int32, 32, type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_N(__int64, 64, type, ptr, val)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_8(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_16(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_32(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_64(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELAXED)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_8(type, ptr, val) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ EASTL_ARCH_ATOMIC_X86_STORE_8(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_16(type, ptr, val) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ EASTL_ARCH_ATOMIC_X86_STORE_16(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_32(type, ptr, val) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ EASTL_ARCH_ATOMIC_X86_STORE_32(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_64(type, ptr, val) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELEASE)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \
+ { \
+ type exchange8; EA_UNUSED(exchange8); \
+ EASTL_ATOMIC_EXCHANGE_SEQ_CST_8(type, exchange8, ptr, val); \
+ }
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \
+ { \
+ type exchange16; EA_UNUSED(exchange16); \
+ EASTL_ATOMIC_EXCHANGE_SEQ_CST_16(type, exchange16, ptr, val); \
+ }
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \
+ { \
+ type exchange32; EA_UNUSED(exchange32); \
+ EASTL_ATOMIC_EXCHANGE_SEQ_CST_32(type, exchange32, ptr, val); \
+ }
+
+
+ /**
+ * NOTE:
+ *
+ * Since 64-bit exchange is wrapped around a cmpxchg8b on 32-bit x86, it is
+ * faster to just do a mov; mfence.
+ */
+ #if defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ EASTL_ATOMIC_COMPILER_BARRIER(); \
+ EASTL_ARCH_ATOMIC_X86_STORE_64(type, ptr, val); \
+ EASTL_ATOMIC_CPU_MB()
+
+
+ #elif defined(EA_PROCESSOR_X86_64)
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ { \
+ type exchange64; EA_UNUSED(exchange64); \
+ EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, exchange64, ptr, val); \
+ }
+
+
+ #endif
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, SEQ_CST)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, MemoryOrder) \
+ { \
+ type exchange128; EA_UNUSED(exchange128); \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_EXCHANGE_, MemoryOrder), _128)(type, exchange128, ptr, val); \
+ }
+
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELAXED_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELAXED)
+
+ #define EASTL_ARCH_ATOMIC_STORE_RELEASE_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, RELEASE)
+
+ #define EASTL_ARCH_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_STORE_128(type, ptr, val, SEQ_CST)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_STORE_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h
new file mode 100644
index 0000000..124b586
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_sub_fetch.h
@@ -0,0 +1,96 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) - (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) - (val))
+
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) - (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) - (val))
+
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_SUB_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_SUB_FETCH_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h
new file mode 100644
index 0000000..fe3bd58
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_thread_fence.h
@@ -0,0 +1,42 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_THREAD_FENCE_*()
+//
+#if defined(EA_COMPILER_MSVC)
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELAXED()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQUIRE() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_RELEASE() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_ACQ_REL() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+#endif
+
+
+#if defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)
+
+ #define EASTL_ARCH_ATOMIC_THREAD_FENCE_SEQ_CST() \
+ EASTL_ATOMIC_CPU_MB()
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_THREAD_FENCE_H */
diff --git a/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h b/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h
new file mode 100644
index 0000000..28cb958
--- /dev/null
+++ b/include/EASTL/internal/atomic/arch/x86/arch_x86_xor_fetch.h
@@ -0,0 +1,96 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ARCH_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
+
+
+ #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) ^ (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) ^ (val))
+
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#if ((defined(EA_COMPILER_CLANG) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
+
+
+ #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED(ret, observed, val) \
+ ret = ((observed) ^ (val))
+
+ #define EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET(ret, prevObserved, val) \
+ ret = ((prevObserved) ^ (val))
+
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELAXED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQUIRE, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, RELEASE, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, ACQ_REL, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+ #define EASTL_ARCH_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, SEQ_CST, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_PRE_COMPUTE_DESIRED, \
+ EASTL_ARCH_ATOMIC_X86_XOR_FETCH_POST_COMPUTE_RET)
+
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_XOR_FETCH_H */
diff --git a/include/EASTL/internal/atomic/atomic.h b/include/EASTL/internal/atomic/atomic.h
new file mode 100644
index 0000000..e1c5286
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic.h
@@ -0,0 +1,252 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_H
+#define EASTL_ATOMIC_INTERNAL_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/internal/memory_base.h>
+#include <EASTL/type_traits.h>
+
+#include "atomic_macros.h"
+#include "atomic_casts.h"
+
+#include "atomic_memory_order.h"
+#include "atomic_asserts.h"
+
+#include "atomic_size_aligned.h"
+#include "atomic_base_width.h"
+
+#include "atomic_integral.h"
+
+#include "atomic_pointer.h"
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/**
+ * NOTE:
+ *
+ * All of the actual implementation is done via the ATOMIC_MACROS in the compiler or arch sub folders.
+ * The C++ code is merely boilerplate around these macros that actually implement the atomic operations.
+ * The C++ boilerplate is also hidden behind macros.
+ * This may seem more complicated but this is all meant to reduce copy-pasting and to ensure all operations
+ * all end up going down to one macro that does the actual implementation.
+ * The reduced code duplication makes it easier to verify the implementation and reason about it.
+ * Ensures we do not have to re-implement the same code for compilers that do not support generic builtins such as MSVC.
+ * Ensures for compilers that have separate intrinsics for different widths, that C++ boilerplate isn't copy-pasted leading to programmer errors.
+ * Ensures if we ever have to implement a new platform, only the low-level leaf macros have to be implemented, everything else will be generated for you.
+ */
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+ template <typename T>
+ struct is_atomic_lockfree_size
+ {
+ static EASTL_CPP17_INLINE_VARIABLE constexpr bool value = false ||
+ #if defined(EASTL_ATOMIC_HAS_8BIT)
+ sizeof(T) == 1 ||
+ #endif
+ #if defined(EASTL_ATOMIC_HAS_16BIT)
+ sizeof(T) == 2 ||
+ #endif
+ #if defined(EASTL_ATOMIC_HAS_32BIT)
+ sizeof(T) == 4 ||
+ #endif
+ #if defined(EASTL_ATOMIC_HAS_64BIT)
+ sizeof(T) == 8 ||
+ #endif
+ #if defined(EASTL_ATOMIC_HAS_128BIT)
+ sizeof(T) == 16 ||
+ #endif
+ false;
+ };
+
+
+ template <typename T>
+ struct is_user_type_suitable_for_primary_template
+ {
+ static EASTL_CPP17_INLINE_VARIABLE constexpr bool value = eastl::internal::is_atomic_lockfree_size<T>::value;
+ };
+
+
+ template <typename T>
+ using select_atomic_inherit_0 = typename eastl::conditional<eastl::is_same_v<bool, T> || eastl::internal::is_user_type_suitable_for_primary_template<T>::value,
+ eastl::internal::atomic_base_width<T>, /* True */
+ eastl::internal::atomic_invalid_type<T> /* False */
+ >::type;
+
+ template <typename T>
+ using select_atomic_inherit = select_atomic_inherit_0<T>;
+
+
+} // namespace internal
+
+
+#define EASTL_ATOMIC_CLASS_IMPL(type, base, valueType, differenceType) \
+ private: \
+ \
+ EASTL_ATOMIC_STATIC_ASSERT_TYPE(type); \
+ \
+ using Base = base; \
+ \
+ public: \
+ \
+ typedef valueType value_type; \
+ typedef differenceType difference_type; \
+ \
+ public: \
+ \
+ static EASTL_CPP17_INLINE_VARIABLE constexpr bool is_always_lock_free = eastl::internal::is_atomic_lockfree_size<type>::value; \
+ \
+ public: /* deleted ctors && assignment operators */ \
+ \
+ atomic(const atomic&) EA_NOEXCEPT = delete; \
+ \
+ atomic& operator=(const atomic&) EA_NOEXCEPT = delete; \
+ atomic& operator=(const atomic&) volatile EA_NOEXCEPT = delete; \
+ \
+ public: /* ctors */ \
+ \
+ EA_CONSTEXPR atomic(type desired) EA_NOEXCEPT \
+ : Base{ desired } \
+ { \
+ } \
+ \
+ EA_CONSTEXPR atomic() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v<type>) = default; \
+ \
+ public: \
+ \
+ bool is_lock_free() const EA_NOEXCEPT \
+ { \
+ return eastl::internal::is_atomic_lockfree_size<type>::value; \
+ } \
+ \
+ bool is_lock_free() const volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(type); \
+ return false; \
+ }
+
+
+#define EASTL_ATOMIC_USING_ATOMIC_BASE(type) \
+ public: \
+ \
+ using Base::operator=; \
+ using Base::store; \
+ using Base::load; \
+ using Base::exchange; \
+ using Base::compare_exchange_weak; \
+ using Base::compare_exchange_strong; \
+ \
+ public: \
+ \
+ operator type() const volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ } \
+ \
+ operator type() const EA_NOEXCEPT \
+ { \
+ return load(eastl::memory_order_seq_cst); \
+ }
+
+
+#define EASTL_ATOMIC_USING_ATOMIC_INTEGRAL() \
+ public: \
+ \
+ using Base::fetch_add; \
+ using Base::add_fetch; \
+ \
+ using Base::fetch_sub; \
+ using Base::sub_fetch; \
+ \
+ using Base::fetch_and; \
+ using Base::and_fetch; \
+ \
+ using Base::fetch_or; \
+ using Base::or_fetch; \
+ \
+ using Base::fetch_xor; \
+ using Base::xor_fetch; \
+ \
+ using Base::operator++; \
+ using Base::operator--; \
+ using Base::operator+=; \
+ using Base::operator-=; \
+ using Base::operator&=; \
+ using Base::operator|=; \
+ using Base::operator^=;
+
+
+#define EASTL_ATOMIC_USING_ATOMIC_POINTER() \
+ public: \
+ \
+ using Base::fetch_add; \
+ using Base::add_fetch; \
+ using Base::fetch_sub; \
+ using Base::sub_fetch; \
+ \
+ using Base::operator++; \
+ using Base::operator--; \
+ using Base::operator+=; \
+ using Base::operator-=;
+
+
+template <typename T, typename = void>
+struct atomic : protected eastl::internal::select_atomic_inherit<T>
+{
+ EASTL_ATOMIC_CLASS_IMPL(T, eastl::internal::select_atomic_inherit<T>, T, T)
+
+ EASTL_ATOMIC_USING_ATOMIC_BASE(T)
+};
+
+
+template <typename T>
+struct atomic<T, eastl::enable_if_t<eastl::is_integral_v<T> && !eastl::is_same_v<bool, T>>> : protected eastl::internal::atomic_integral_width<T>
+{
+ EASTL_ATOMIC_CLASS_IMPL(T, eastl::internal::atomic_integral_width<T>, T, T)
+
+ EASTL_ATOMIC_USING_ATOMIC_BASE(T)
+
+ EASTL_ATOMIC_USING_ATOMIC_INTEGRAL()
+};
+
+
+template <typename T>
+struct atomic<T*> : protected eastl::internal::atomic_pointer_width<T*>
+{
+ EASTL_ATOMIC_CLASS_IMPL(T*, eastl::internal::atomic_pointer_width<T*>, T*, ptrdiff_t)
+
+ EASTL_ATOMIC_USING_ATOMIC_BASE(T*)
+
+ EASTL_ATOMIC_USING_ATOMIC_POINTER()
+};
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_H */
diff --git a/include/EASTL/internal/atomic/atomic_asserts.h b/include/EASTL/internal/atomic/atomic_asserts.h
new file mode 100644
index 0000000..9324a47
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_asserts.h
@@ -0,0 +1,75 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H
+#define EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(type) \
+ static_assert(!eastl::is_same<type, type>::value, "eastl::atomic<T> : volatile eastl::atomic<T> is not what you expect! Read the docs in EASTL/atomic.h! Use the memory orders to access the atomic object!");
+
+#define EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(type) \
+ static_assert(!eastl::is_same<type, type>::value, "eastl::atomic<T> : invalid memory order for the given operation!");
+
+#define EASTL_ATOMIC_STATIC_ASSERT_TYPE(type) \
+ /* User Provided T must not be cv qualified */ \
+ static_assert(!eastl::is_const<type>::value, "eastl::atomic<T> : Template Typename T cannot be const!"); \
+ static_assert(!eastl::is_volatile<type>::value, "eastl::atomic<T> : Template Typename T cannot be volatile! Use the memory orders to access the underlying type for the guarantees you need."); \
+ /* T must satisfy StandardLayoutType */ \
+ static_assert(eastl::is_standard_layout<type>::value, "eastl::atomic<T> : Must have standard layout!"); \
+ /* T must be TriviallyCopyable but it does not have to be TriviallyConstructible */ \
+ static_assert(eastl::is_trivially_copyable<type>::value, "eastl::atomci<T> : Template Typename T must be trivially copyable!"); \
+ static_assert(eastl::is_copy_constructible<type>::value, "eastl::atomic<T> : Template Typename T must be copy constructible!"); \
+ static_assert(eastl::is_move_constructible<type>::value, "eastl::atomic<T> : Template Typename T must be move constructible!"); \
+ static_assert(eastl::is_copy_assignable<type>::value, "eastl::atomic<T> : Template Typename T must be copy assignable!"); \
+ static_assert(eastl::is_move_assignable<type>::value, "eastl::atomic<T> : Template Typename T must be move assignable!"); \
+ static_assert(eastl::is_trivially_destructible<type>::value, "eastl::atomic<T> : Must be trivially destructible!"); \
+ static_assert(eastl::internal::is_atomic_lockfree_size<type>::value, "eastl::atomic<T> : Template Typename T must be a lockfree size!");
+
+#define EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(type) \
+ static_assert(eastl::is_object<type>::value, "eastl::atomic<T> : Template Typename T must be an object type!");
+
+#define EASTL_ATOMIC_ASSERT_ALIGNED(alignment) \
+ EASTL_ASSERT((alignment & (alignment - 1)) == 0); \
+ EASTL_ASSERT((reinterpret_cast<uintptr_t>(this) & (alignment - 1)) == 0)
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+ template <typename T>
+ struct atomic_invalid_type
+ {
+ /**
+ * class Test { int i; int j; int k; }; sizeof(Test) == 96 bits
+ *
+ * std::atomic allows non-primitive types to be used for the template type.
+ * This causes the api to degrade to locking for types that cannot fit into the lockfree size
+ * of the target platform such as std::atomic<Test> leading to performance traps.
+ *
+ * If this static_assert() fired, this means your template type T is larger than any atomic instruction
+ * supported on the given platform.
+ */
+
+ static_assert(!eastl::is_same<T, T>::value, "eastl::atomic<T> : invalid template type T!");
+ };
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_STATIC_ASSERTS_H */
diff --git a/include/EASTL/internal/atomic/atomic_base_width.h b/include/EASTL/internal/atomic/atomic_base_width.h
new file mode 100644
index 0000000..ca47618
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_base_width.h
@@ -0,0 +1,346 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H
+#define EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+ template <typename T, unsigned width = sizeof(T)>
+ struct atomic_base_width;
+
+ /**
+ * NOTE:
+ *
+ * T does not have to be trivially default constructible but it still
+ * has to be a trivially copyable type for the primary atomic template.
+ * Thus we must type pun into whatever storage type of the given fixed width
+ * the platform designates. This ensures T does not have to be trivially constructible.
+ */
+
+#define EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) \
+ EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_FIXED_WIDTH_TYPE_, bits)
+
+
+#define EASTL_ATOMIC_STORE_FUNC_IMPL(op, bits) \
+ EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \
+ EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \
+ EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \
+ fixedWidthDesired)
+
+
+#define EASTL_ATOMIC_LOAD_FUNC_IMPL(op, bits) \
+ EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) retVal; \
+ EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \
+ retVal, \
+ EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress())); \
+ return EASTL_ATOMIC_TYPE_PUN_CAST(T, retVal);
+
+
+#define EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(op, bits) \
+ EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) retVal; \
+ EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \
+ EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \
+ retVal, \
+ EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \
+ fixedWidthDesired); \
+ return EASTL_ATOMIC_TYPE_PUN_CAST(T, retVal);
+
+
+#define EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(op, bits) \
+ bool retVal; \
+ EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits) fixedWidthDesired = EASTL_ATOMIC_TYPE_PUN_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), desired); \
+ EA_PREPROCESSOR_JOIN(op, bits)(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), \
+ retVal, \
+ EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), this->GetAtomicAddress()), \
+ EASTL_ATOMIC_TYPE_CAST(EASTL_ATOMIC_BASE_FIXED_WIDTH_TYPE(bits), &expected), \
+ fixedWidthDesired); \
+ return retVal;
+
+
+#define EASTL_ATOMIC_BASE_OP_JOIN(op, Order) \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, op), Order)
+
+
+#define EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(funcName, cmpxchgOp, bits) \
+ using Base::funcName; \
+ \
+ bool funcName(T& expected, T desired) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_release_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELEASE_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_relaxed_s, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELAXED_RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acquire_s, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acquire_s, \
+ eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQUIRE_ACQUIRE_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_release_s, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _RELEASE_RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acq_rel_s, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_acq_rel_s, \
+ eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _ACQ_REL_ACQUIRE_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_seq_cst_s, \
+ eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_RELAXED_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_seq_cst_s, \
+ eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_ACQUIRE_), bits); \
+ } \
+ \
+ bool funcName(T& expected, T desired, \
+ eastl::internal::memory_order_seq_cst_s, \
+ eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_CMPXCHG_FUNC_IMPL(EASTL_ATOMIC_BASE_OP_JOIN(cmpxchgOp, _SEQ_CST_SEQ_CST_), bits); \
+ }
+
+#define EASTL_ATOMIC_BASE_CMPXCHG_WEAK_FUNCS_IMPL(bits) \
+ EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(compare_exchange_weak, CMPXCHG_WEAK, bits)
+
+#define EASTL_ATOMIC_BASE_CMPXCHG_STRONG_FUNCS_IMPL(bits) \
+ EASTL_ATOMIC_BASE_CMPXCHG_FUNCS_IMPL(compare_exchange_strong, CMPXCHG_STRONG, bits)
+
+
+#define EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(bytes, bits) \
+ template <typename T> \
+ struct atomic_base_width<T, bytes> : public atomic_size_aligned<T> \
+ { \
+ private: \
+ \
+ static_assert(EA_ALIGN_OF(atomic_size_aligned<T>) == bytes, "eastl::atomic<T> must be sizeof(T) aligned!"); \
+ static_assert(EA_ALIGN_OF(atomic_size_aligned<T>) == sizeof(T), "eastl::atomic<T> must be sizeof(T) aligned!"); \
+ using Base = atomic_size_aligned<T>; \
+ \
+ public: /* ctors */ \
+ \
+ EA_CONSTEXPR atomic_base_width(T desired) EA_NOEXCEPT \
+ : Base{ desired } \
+ { \
+ } \
+ \
+ EA_CONSTEXPR atomic_base_width() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v<T>) = default; \
+ \
+ atomic_base_width(const atomic_base_width&) EA_NOEXCEPT = delete; \
+ \
+ public: /* store */ \
+ \
+ using Base::store; \
+ \
+ void store(T desired) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_SEQ_CST_, bits); \
+ } \
+ \
+ void store(T desired, eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_RELAXED_, bits); \
+ } \
+ \
+ void store(T desired, eastl::internal::memory_order_release_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_RELEASE_, bits); \
+ } \
+ \
+ void store(T desired, eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STORE_FUNC_IMPL(EASTL_ATOMIC_STORE_SEQ_CST_, bits); \
+ } \
+ \
+ public: /* load */ \
+ \
+ using Base::load; \
+ \
+ T load() const EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_SEQ_CST_, bits); \
+ } \
+ \
+ T load(eastl::internal::memory_order_relaxed_s) const EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_RELAXED_, bits); \
+ } \
+ \
+ T load(eastl::internal::memory_order_acquire_s) const EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_ACQUIRE_, bits); \
+ } \
+ \
+ T load(eastl::internal::memory_order_seq_cst_s) const EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_LOAD_FUNC_IMPL(EASTL_ATOMIC_LOAD_SEQ_CST_, bits); \
+ } \
+ \
+ public: /* exchange */ \
+ \
+ using Base::exchange; \
+ \
+ T exchange(T desired) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_SEQ_CST_, bits); \
+ } \
+ \
+ T exchange(T desired, eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_RELAXED_, bits); \
+ } \
+ \
+ T exchange(T desired, eastl::internal::memory_order_acquire_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_ACQUIRE_, bits); \
+ } \
+ \
+ T exchange(T desired, eastl::internal::memory_order_release_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_RELEASE_, bits); \
+ } \
+ \
+ T exchange(T desired, eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_ACQ_REL_, bits); \
+ } \
+ \
+ T exchange(T desired, eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_EXCHANGE_FUNC_IMPL(EASTL_ATOMIC_EXCHANGE_SEQ_CST_, bits); \
+ } \
+ \
+ public: /* compare_exchange_weak */ \
+ \
+ EASTL_ATOMIC_BASE_CMPXCHG_WEAK_FUNCS_IMPL(bits) \
+ \
+ public: /* compare_exchange_strong */ \
+ \
+ EASTL_ATOMIC_BASE_CMPXCHG_STRONG_FUNCS_IMPL(bits) \
+ \
+ public: /* assignment operator */ \
+ \
+ using Base::operator=; \
+ \
+ T operator=(T desired) EA_NOEXCEPT \
+ { \
+ store(desired, eastl::memory_order_seq_cst); \
+ return desired; \
+ } \
+ \
+ atomic_base_width& operator=(const atomic_base_width&) EA_NOEXCEPT = delete; \
+ atomic_base_width& operator=(const atomic_base_width&) volatile EA_NOEXCEPT = delete; \
+ \
+ };
+
+
+#if defined(EASTL_ATOMIC_HAS_8BIT)
+ EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(1, 8)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_16BIT)
+ EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(2, 16)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+ EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(4, 32)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+ EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(8, 64)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+ EASTL_ATOMIC_BASE_WIDTH_SPECIALIZE(16, 128)
+#endif
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_BASE_WIDTH_H */
diff --git a/include/EASTL/internal/atomic/atomic_casts.h b/include/EASTL/internal/atomic/atomic_casts.h
new file mode 100644
index 0000000..54b9ed2
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_casts.h
@@ -0,0 +1,190 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_CASTS_H
+#define EASTL_ATOMIC_INTERNAL_CASTS_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include <EASTL/internal/type_transformations.h>
+
+
+#include <string.h>
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+template <typename T>
+EASTL_FORCE_INLINE volatile T* AtomicVolatileCast(T* ptr) EA_NOEXCEPT
+{
+ static_assert(!eastl::is_volatile<volatile T*>::value, "eastl::atomic<T> : pointer must not be volatile, the pointed to type must be volatile!");
+ static_assert(eastl::is_volatile<volatile T>::value, "eastl::atomic<T> : the pointed to type must be volatile!");
+
+ return reinterpret_cast<volatile T*>(ptr);
+}
+
+
+/**
+ * NOTE:
+ *
+ * Some compiler intrinsics do not operate on pointer types thus
+ * doing atomic operations on pointers must be casted to the suitable
+ * sized unsigned integral type.
+ *
+ * Some compiler intrinsics aren't generics and thus structs must also
+ * be casted to the appropriate sized unsigned integral type.
+ *
+ * Atomic operations on an int* might have to be casted to a uint64_t on
+ * a platform with 8-byte pointers as an example.
+ *
+ * Also doing an atomic operation on a struct, we must ensure that we observe
+ * the whole struct as one atomic unit with no shearing between the members.
+ * A load of a struct with two uint32_t members must be one uint64_t load,
+ * not two separate uint32_t loads, thus casted to the suitable sized
+ * unsigned integral type.
+ */
+template <typename Integral, typename T>
+EASTL_FORCE_INLINE volatile Integral* AtomicVolatileIntegralCast(T* ptr) EA_NOEXCEPT
+{
+ static_assert(!eastl::is_volatile<volatile Integral*>::value, "eastl::atomic<T> : pointer must not be volatile, the pointed to type must be volatile!");
+ static_assert(eastl::is_volatile<volatile Integral>::value, "eastl::atomic<T> : the pointed to type must be volatile!");
+ static_assert(eastl::is_integral<Integral>::value, "eastl::atomic<T> : Integral cast must cast to an Integral type!");
+ static_assert(sizeof(Integral) == sizeof(T), "eastl::atomic<T> : Integral and T must be same size for casting!");
+
+ return reinterpret_cast<volatile Integral*>(ptr);
+}
+
+template <typename Integral, typename T>
+EASTL_FORCE_INLINE Integral* AtomicIntegralCast(T* ptr) EA_NOEXCEPT
+{
+ static_assert(eastl::is_integral<Integral>::value, "eastl::atomic<T> : Integral cast must cast to an Integral type!");
+ static_assert(sizeof(Integral) == sizeof(T), "eastl::atomic<T> : Integral and T must be same size for casting!");
+
+ return reinterpret_cast<Integral*>(ptr);
+}
+
+
+/**
+ * NOTE:
+ *
+ * These casts are meant to be used with unions or structs of larger types that must be casted
+ * down to the smaller integral types. Like with 128-bit atomics and msvc intrinsics.
+ *
+ * struct Foo128 { __int64 array[2]; }; can be casted to a __int64*
+ * since a poiter to Foo128 is a pointer to the first member.
+ */
+template <typename ToType, typename FromType>
+EASTL_FORCE_INLINE volatile ToType* AtomicVolatileTypeCast(FromType* ptr) EA_NOEXCEPT
+{
+ static_assert(!eastl::is_volatile<volatile ToType*>::value, "eastl::atomic<T> : pointer must not be volatile, the pointed to type must be volatile!");
+ static_assert(eastl::is_volatile<volatile ToType>::value, "eastl::atomic<T> : the pointed to type must be volatile!");
+
+ return reinterpret_cast<volatile ToType*>(ptr);
+}
+
+template <typename ToType, typename FromType>
+EASTL_FORCE_INLINE ToType* AtomicTypeCast(FromType* ptr) EA_NOEXCEPT
+{
+ return reinterpret_cast<ToType*>(ptr);
+}
+
+
+/**
+ * NOTE:
+ *
+ * This is a compiler guaranteed safe type punning.
+ * This is useful when dealing with user defined structs.
+ * struct Test { uint32_t; unint32_t; };
+ *
+ * Example:
+ * uint64_t atomicLoad = *((volatile uint64_t*)&Test);
+ * Test load = AtomicTypePunCast<Test, uint64_t>(atomicLoad);
+ *
+ * uint64_t comparand = AtomicTypePunCast<uint64_t, Test>(Test);
+ * cmpxchg(&Test, comparand, desired);
+ *
+ * This can be implemented in many different ways depending on the compiler such
+ * as thru a union, memcpy, reinterpret_cast<Test&>(atomicLoad), etc.
+ */
+template <typename Pun, typename T, eastl::enable_if_t<!eastl::is_same_v<Pun, T>, int> = 0>
+EASTL_FORCE_INLINE Pun AtomicTypePunCast(const T& fromType) EA_NOEXCEPT
+{
+ static_assert(sizeof(Pun) == sizeof(T), "eastl::atomic<T> : Pun and T must be the same size for type punning!");
+
+ /**
+ * aligned_storage ensures we can TypePun objects that aren't trivially default constructible
+ * but still trivially copyable.
+ */
+ typename eastl::aligned_storage<sizeof(Pun), alignof(Pun)>::type ret;
+ memcpy(eastl::addressof(ret), eastl::addressof(fromType), sizeof(Pun));
+ return reinterpret_cast<Pun&>(ret);
+}
+
+template <typename Pun, typename T, eastl::enable_if_t<eastl::is_same_v<Pun, T>, int> = 0>
+EASTL_FORCE_INLINE Pun AtomicTypePunCast(const T& fromType) EA_NOEXCEPT
+{
+ return fromType;
+}
+
+
+template <typename T>
+EASTL_FORCE_INLINE T AtomicNegateOperand(T val) EA_NOEXCEPT
+{
+ static_assert(eastl::is_integral<T>::value, "eastl::atomic<T> : Integral Negation must be an Integral type!");
+ static_assert(!eastl::is_volatile<T>::value, "eastl::atomic<T> : T must not be volatile!");
+
+ return static_cast<T>(0U - static_cast<eastl::make_unsigned_t<T>>(val));
+}
+
+EASTL_FORCE_INLINE ptrdiff_t AtomicNegateOperand(ptrdiff_t val) EA_NOEXCEPT
+{
+ return -val;
+}
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+/**
+ * NOTE:
+ *
+ * These macros are meant to prevent inclusion hell.
+ * Also so that it fits with the style of the rest of the atomic macro implementation.
+ */
+#define EASTL_ATOMIC_VOLATILE_CAST(ptr) \
+ eastl::internal::AtomicVolatileCast((ptr))
+
+#define EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(IntegralType, ptr) \
+ eastl::internal::AtomicVolatileIntegralCast<IntegralType>((ptr))
+
+#define EASTL_ATOMIC_INTEGRAL_CAST(IntegralType, ptr) \
+ eastl::internal::AtomicIntegralCast<IntegralType>((ptr))
+
+#define EASTL_ATOMIC_VOLATILE_TYPE_CAST(ToType, ptr) \
+ eastl::internal::AtomicVolatileTypeCast<ToType>((ptr))
+
+#define EASTL_ATOMIC_TYPE_CAST(ToType, ptr) \
+ eastl::internal::AtomicTypeCast<ToType>((ptr))
+
+#define EASTL_ATOMIC_TYPE_PUN_CAST(PunType, fromType) \
+ eastl::internal::AtomicTypePunCast<PunType>((fromType))
+
+#define EASTL_ATOMIC_NEGATE_OPERAND(val) \
+ eastl::internal::AtomicNegateOperand((val))
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_CASTS_H */
diff --git a/include/EASTL/internal/atomic/atomic_flag.h b/include/EASTL/internal/atomic/atomic_flag.h
new file mode 100644
index 0000000..e135d61
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_flag.h
@@ -0,0 +1,170 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H
+#define EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+class atomic_flag
+{
+public: /* ctors */
+
+ EA_CONSTEXPR atomic_flag(bool desired) EA_NOEXCEPT
+ : mFlag{ desired }
+ {
+ }
+
+ EA_CONSTEXPR atomic_flag() EA_NOEXCEPT
+ : mFlag{ false }
+ {
+ }
+
+public: /* deleted ctors && assignment operators */
+
+ atomic_flag(const atomic_flag&) EA_NOEXCEPT = delete;
+
+ atomic_flag& operator=(const atomic_flag&) EA_NOEXCEPT = delete;
+ atomic_flag& operator=(const atomic_flag&) volatile EA_NOEXCEPT = delete;
+
+public: /* clear */
+
+ template <typename Order>
+ void clear(Order order) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order);
+ }
+
+ template <typename Order>
+ void clear(Order order) EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
+ }
+
+ void clear(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT
+ {
+ mFlag.store(false, eastl::memory_order_relaxed);
+ }
+
+ void clear(eastl::internal::memory_order_release_s) EA_NOEXCEPT
+ {
+ mFlag.store(false, eastl::memory_order_release);
+ }
+
+ void clear(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT
+ {
+ mFlag.store(false, eastl::memory_order_seq_cst);
+ }
+
+ void clear() EA_NOEXCEPT
+ {
+ mFlag.store(false, eastl::memory_order_seq_cst);
+ }
+
+public: /* test_and_set */
+
+ template <typename Order>
+ bool test_and_set(Order order) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order);
+ return false;
+ }
+
+ template <typename Order>
+ bool test_and_set(Order order) EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
+ return false;
+ }
+
+ bool test_and_set(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_relaxed);
+ }
+
+ bool test_and_set(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_acquire);
+ }
+
+ bool test_and_set(eastl::internal::memory_order_release_s) EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_release);
+ }
+
+ bool test_and_set(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_acq_rel);
+ }
+
+ bool test_and_set(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_seq_cst);
+ }
+
+ bool test_and_set() EA_NOEXCEPT
+ {
+ return mFlag.exchange(true, eastl::memory_order_seq_cst);
+ }
+
+public: /* test */
+
+ template <typename Order>
+ bool test(Order order) const volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(Order);
+ return false;
+ }
+
+ template <typename Order>
+ bool test(Order order) const EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
+ return false;
+ }
+
+ bool test(eastl::internal::memory_order_relaxed_s) const EA_NOEXCEPT
+ {
+ return mFlag.load(eastl::memory_order_relaxed);
+ }
+
+ bool test(eastl::internal::memory_order_acquire_s) const EA_NOEXCEPT
+ {
+ return mFlag.load(eastl::memory_order_acquire);
+ }
+
+ bool test(eastl::internal::memory_order_seq_cst_s) const EA_NOEXCEPT
+ {
+ return mFlag.load(eastl::memory_order_seq_cst);
+ }
+
+ bool test() const EA_NOEXCEPT
+ {
+ return mFlag.load(eastl::memory_order_seq_cst);
+ }
+
+private:
+
+ eastl::atomic<bool> mFlag;
+};
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNA_ATOMIC_FLAG_H */
diff --git a/include/EASTL/internal/atomic/atomic_flag_standalone.h b/include/EASTL/internal/atomic/atomic_flag_standalone.h
new file mode 100644
index 0000000..b5284be
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_flag_standalone.h
@@ -0,0 +1,69 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H
+#define EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+namespace eastl
+{
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// bool atomic_flag_test_and_set(eastl::atomic<T>*)
+//
+EASTL_FORCE_INLINE bool atomic_flag_test_and_set(eastl::atomic_flag* atomicObj) EA_NOEXCEPT
+{
+ return atomicObj->test_and_set();
+}
+
+template <typename Order>
+EASTL_FORCE_INLINE bool atomic_flag_test_and_set_explicit(eastl::atomic_flag* atomicObj, Order order)
+{
+ return atomicObj->test_and_set(order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// bool atomic_flag_clear(eastl::atomic<T>*)
+//
+EASTL_FORCE_INLINE void atomic_flag_clear(eastl::atomic_flag* atomicObj)
+{
+ atomicObj->clear();
+}
+
+template <typename Order>
+EASTL_FORCE_INLINE void atomic_flag_clear_explicit(eastl::atomic_flag* atomicObj, Order order)
+{
+ atomicObj->clear(order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// bool atomic_flag_test(eastl::atomic<T>*)
+//
+EASTL_FORCE_INLINE bool atomic_flag_test(eastl::atomic_flag* atomicObj)
+{
+ return atomicObj->test();
+}
+
+template <typename Order>
+EASTL_FORCE_INLINE bool atomic_flag_test_explicit(eastl::atomic_flag* atomicObj, Order order)
+{
+ return atomicObj->test(order);
+}
+
+
+} // namespace eastl
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_FLAG_STANDALONE_H */
diff --git a/include/EASTL/internal/atomic/atomic_integral.h b/include/EASTL/internal/atomic/atomic_integral.h
new file mode 100644
index 0000000..7c94db3
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_integral.h
@@ -0,0 +1,343 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_INTEGRAL_H
+#define EASTL_ATOMIC_INTERNAL_INTEGRAL_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(funcName) \
+ template <typename Order> \
+ T funcName(T arg, Order order) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
+ } \
+ \
+ template <typename Order> \
+ T funcName(T arg, Order order) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ } \
+ \
+ T funcName(T arg) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+
+#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(operatorOp) \
+ T operator operatorOp() volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ } \
+ \
+ T operator operatorOp(int) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+
+#define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(operatorOp) \
+ T operator operatorOp(T arg) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+
+ template <typename T, unsigned width = sizeof(T)>
+ struct atomic_integral_base : public atomic_base_width<T, width>
+ {
+ private:
+
+ using Base = atomic_base_width<T, width>;
+
+ public: /* ctors */
+
+ EA_CONSTEXPR atomic_integral_base(T desired) EA_NOEXCEPT
+ : Base{ desired }
+ {
+ }
+
+ EA_CONSTEXPR atomic_integral_base() EA_NOEXCEPT = default;
+
+ atomic_integral_base(const atomic_integral_base&) EA_NOEXCEPT = delete;
+
+ public: /* assignment operator */
+
+ using Base::operator=;
+
+ atomic_integral_base& operator=(const atomic_integral_base&) EA_NOEXCEPT = delete;
+ atomic_integral_base& operator=(const atomic_integral_base&) volatile EA_NOEXCEPT = delete;
+
+ public: /* fetch_add */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_add)
+
+ public: /* add_fetch */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(add_fetch)
+
+ public: /* fetch_sub */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_sub)
+
+ public: /* sub_fetch */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(sub_fetch)
+
+ public: /* fetch_and */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_and)
+
+ public: /* and_fetch */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(and_fetch)
+
+ public: /* fetch_or */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_or)
+
+ public: /* or_fetch */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(or_fetch)
+
+ public: /* fetch_xor */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_xor)
+
+ public: /* xor_fetch */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(xor_fetch)
+
+ public: /* operator++ && operator-- */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(++)
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(--)
+
+ public: /* operator+= && operator-= */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(+=)
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(-=)
+
+ public: /* operator&= */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(&=)
+
+ public: /* operator|= */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(|=)
+
+ public: /* operator^= */
+
+ EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(^=)
+
+ };
+
+
+ template <typename T, unsigned width = sizeof(T)>
+ struct atomic_integral_width;
+
+#define EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits) \
+ T retVal; \
+ EA_PREPROCESSOR_JOIN(op, bits)(T, retVal, this->GetAtomicAddress(), arg); \
+ return retVal;
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_IMPL(funcName, op, bits) \
+ T funcName(T arg) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits); \
+ }
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, orderType, op, bits) \
+ T funcName(T arg, orderType) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits); \
+ }
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, Order) \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, fetchOp), Order)
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(funcName, fetchOp, bits) \
+ using Base::funcName; \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_IMPL(funcName, EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_relaxed_s, \
+ EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _RELAXED_), bits) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acquire_s, \
+ EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _ACQUIRE_), bits) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_release_s, \
+ EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _RELEASE_), bits) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acq_rel_s, \
+ EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _ACQ_REL_), bits) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_seq_cst_s, \
+ EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits)
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(operatorOp, preFuncName, postFuncName) \
+ using Base::operator operatorOp; \
+ \
+ T operator operatorOp() EA_NOEXCEPT \
+ { \
+ return preFuncName(1, eastl::memory_order_seq_cst); \
+ } \
+ \
+ T operator operatorOp(int) EA_NOEXCEPT \
+ { \
+ return postFuncName(1, eastl::memory_order_seq_cst); \
+ }
+
+#define EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(operatorOp, funcName) \
+ using Base::operator operatorOp; \
+ \
+ T operator operatorOp(T arg) EA_NOEXCEPT \
+ { \
+ return funcName(arg, eastl::memory_order_seq_cst); \
+ }
+
+
+#define EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(bytes, bits) \
+ template <typename T> \
+ struct atomic_integral_width<T, bytes> : public atomic_integral_base<T, bytes> \
+ { \
+ private: \
+ \
+ using Base = atomic_integral_base<T, bytes>; \
+ \
+ public: /* ctors */ \
+ \
+ EA_CONSTEXPR atomic_integral_width(T desired) EA_NOEXCEPT \
+ : Base{ desired } \
+ { \
+ } \
+ \
+ EA_CONSTEXPR atomic_integral_width() EA_NOEXCEPT = default; \
+ \
+ atomic_integral_width(const atomic_integral_width&) EA_NOEXCEPT = delete; \
+ \
+ public: /* assignment operator */ \
+ \
+ using Base::operator=; \
+ \
+ atomic_integral_width& operator=(const atomic_integral_width&) EA_NOEXCEPT = delete; \
+ atomic_integral_width& operator=(const atomic_integral_width&) volatile EA_NOEXCEPT = delete; \
+ \
+ public: /* fetch_add */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_add, FETCH_ADD, bits) \
+ \
+ public: /* add_fetch */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(add_fetch, ADD_FETCH, bits) \
+ \
+ public: /* fetch_sub */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_sub, FETCH_SUB, bits) \
+ \
+ public: /* sub_fetch */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(sub_fetch, SUB_FETCH, bits) \
+ \
+ public: /* fetch_and */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_and, FETCH_AND, bits) \
+ \
+ public: /* and_fetch */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(and_fetch, AND_FETCH, bits) \
+ \
+ public: /* fetch_or */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_or, FETCH_OR, bits) \
+ \
+ public: /* or_fetch */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(or_fetch, OR_FETCH, bits) \
+ \
+ public: /* fetch_xor */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_xor, FETCH_XOR, bits) \
+ \
+ public: /* xor_fetch */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(xor_fetch, XOR_FETCH, bits) \
+ \
+ public: /* operator++ && operator-- */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(++, add_fetch, fetch_add) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(--, sub_fetch, fetch_sub) \
+ \
+ public: /* operator+= && operator-= */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(+=, add_fetch) \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(-=, sub_fetch) \
+ \
+ public: /* operator&= */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(&=, and_fetch) \
+ \
+ public: /* operator|= */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(|=, or_fetch) \
+ \
+ public: /* operator^= */ \
+ \
+ EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(^=, xor_fetch) \
+ \
+ };
+
+
+#if defined(EASTL_ATOMIC_HAS_8BIT)
+ EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(1, 8)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_16BIT)
+ EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(2, 16)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_32BIT)
+ EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(4, 32)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT)
+ EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(8, 64)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_128BIT)
+ EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(16, 128)
+#endif
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_INTEGRAL_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros.h b/include/EASTL/internal/atomic/atomic_macros.h
new file mode 100644
index 0000000..756a4b4
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros.h
@@ -0,0 +1,67 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// The reason for the implementation separating out into a compiler and architecture
+// folder is as follows.
+//
+// The compiler directory is meant to implement atomics using the compiler provided
+// intrinsics. This also implies that usually the same compiler instrinsic implementation
+// can be used for any architecture the compiler supports. If a compiler provides intrinsics
+// to support barriers or atomic operations, then that implementation should be in the
+// compiler directory.
+//
+// The arch directory is meant to manually implement atomics for a specific architecture
+// such as power or x86. There may be some compiler specific code in this directory because
+// GCC inline assembly syntax may be different than another compiler as an example.
+//
+// The arch directory can also be used to implement some atomic operations ourselves
+// if we deem the compiler provided implementation to be inefficient for the given
+// architecture or we need to do some things manually for a given compiler.
+//
+// The atomic_macros directory implements the macros that the rest of the atomic
+// library uses. These macros will expand to either the compiler or arch implemented
+// macro. The arch implemented macro is given priority over the compiler implemented
+// macro if both are implemented otherwise whichever is implemented is chosen or
+// an error is emitted if none are implemented.
+//
+// The implementation being all macros has a couple nice side effects as well.
+//
+// 1. All the implementation ends up funneling into one low level macro implementation
+// which makes it easy to verify correctness, reduce copy-paste errors and differences
+// in various platform implementations.
+//
+// 2. Allows for the implementation to be implemented efficiently on compilers that do not
+// directly implement the C++ memory model in their intrinsics such as msvc.
+//
+// 3. Allows for the implementation of atomics that may not be supported on the given platform,
+// such as 128-bit atomics on 32-bit platforms since the macros will only ever be expanded
+// on platforms that support said features. This makes implementing said features pretty easy
+// since we do not have to worry about complicated feature detection in the low level implementations.
+//
+// The macro implementation may asume that all passed in types are trivially constructible thus it is
+// free to create local variables of the passed in types as it may please.
+// It may also assume that all passed in types are trivially copyable as well.
+// It cannot assume any passed in type is any given type thus is a specific type if needed, it must do an
+// EASTL_ATOMIC_TYPE_PUN_CAST() to the required type.
+//
+
+
+#include "compiler/compiler.h"
+#include "arch/arch.h"
+
+#include "atomic_macros/atomic_macros.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h
new file mode 100644
index 0000000..941ac51
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros.h
@@ -0,0 +1,145 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H
+#define EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include "atomic_macros_base.h"
+
+#include "atomic_macros_fetch_add.h"
+#include "atomic_macros_fetch_sub.h"
+
+#include "atomic_macros_fetch_and.h"
+#include "atomic_macros_fetch_xor.h"
+#include "atomic_macros_fetch_or.h"
+
+#include "atomic_macros_add_fetch.h"
+#include "atomic_macros_sub_fetch.h"
+
+#include "atomic_macros_and_fetch.h"
+#include "atomic_macros_xor_fetch.h"
+#include "atomic_macros_or_fetch.h"
+
+#include "atomic_macros_exchange.h"
+
+#include "atomic_macros_cmpxchg_weak.h"
+#include "atomic_macros_cmpxchg_strong.h"
+
+#include "atomic_macros_load.h"
+#include "atomic_macros_store.h"
+
+#include "atomic_macros_compiler_barrier.h"
+
+#include "atomic_macros_cpu_pause.h"
+
+#include "atomic_macros_memory_barrier.h"
+
+#include "atomic_macros_signal_fence.h"
+
+#include "atomic_macros_thread_fence.h"
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#if defined(EASTL_COMPILER_ATOMIC_HAS_8BIT) || defined(EASTL_ARCH_ATOMIC_HAS_8BIT)
+
+ #define EASTL_ATOMIC_HAS_8BIT
+
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_HAS_16BIT) || defined(EASTL_ARCH_ATOMIC_HAS_16BIT)
+
+ #define EASTL_ATOMIC_HAS_16BIT
+
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_HAS_32BIT) || defined(EASTL_ARCH_ATOMIC_HAS_32BIT)
+
+ #define EASTL_ATOMIC_HAS_32BIT
+
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_HAS_64BIT) || defined(EASTL_ARCH_ATOMIC_HAS_64BIT)
+
+ #define EASTL_ATOMIC_HAS_64BIT
+
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_HAS_128BIT) || defined(EASTL_ARCH_ATOMIC_HAS_128BIT)
+
+ #define EASTL_ATOMIC_HAS_128BIT
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_8)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_8 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_8
+
+#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_8 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8
+
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_16)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_16 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_16
+
+#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_16 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16
+
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_32)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_32 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_32
+
+#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_32 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32
+
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_64)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_64 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_64
+
+#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_64 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64
+
+#endif
+
+
+#if defined(EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_128)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 EASTL_ARCH_ATOMIC_FIXED_WIDTH_TYPE_128
+
+#elif defined(EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128)
+
+ #define EASTL_ATOMIC_FIXED_WIDTH_TYPE_128 EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_ATOMIC_MACROS_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h
new file mode 100644
index 0000000..f551a07
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_add_fetch.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_ADD_FETCH_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_ADD_FETCH_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h
new file mode 100644
index 0000000..6912722
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_and_fetch.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_AND_FETCH_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_AND_FETCH_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h
new file mode 100644
index 0000000..f03720d
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_base.h
@@ -0,0 +1,65 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_BASE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_BASE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_ATOMIC_INTERNAL_COMPILER_AVAILABLE(op) \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_COMPILER_, op), _AVAILABLE)
+
+#define EASTL_ATOMIC_INTERNAL_ARCH_AVAILABLE(op) \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op), _AVAILABLE)
+
+#define EASTL_ATOMIC_INTERNAL_NOT_IMPLEMENTED_ERROR(...) \
+ static_assert(false, "eastl::atomic<T> atomic macro not implemented!")
+
+
+/* Compiler && Arch Not Implemented */
+#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_00(op) \
+ EASTL_ATOMIC_INTERNAL_NOT_IMPLEMENTED_ERROR
+
+/* Arch Implemented */
+#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_01(op) \
+ EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op)
+
+/* Compiler Implmented */
+#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_10(op) \
+ EA_PREPROCESSOR_JOIN(EASTL_COMPILER_, op)
+
+/* Compiler && Arch Implemented */
+#define EASTL_ATOMIC_INTERNAL_OP_PATTERN_11(op) \
+ EA_PREPROCESSOR_JOIN(EASTL_ARCH_, op)
+
+
+/* This macro creates the pattern macros above for the 2x2 True-False truth table */
+#define EASTL_ATOMIC_INTERNAL_OP_HELPER1(compiler, arch, op) \
+ EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_INTERNAL_OP_PATTERN_, EA_PREPROCESSOR_JOIN(compiler, arch))(op)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// EASTL_ATOMIC_CHOOSE_OP_IMPL
+//
+// This macro chooses between the compiler or architecture implementation for a
+// given atomic operation.
+//
+// USAGE:
+//
+// EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_8)(ret, ptr, val)
+//
+#define EASTL_ATOMIC_CHOOSE_OP_IMPL(op) \
+ EASTL_ATOMIC_INTERNAL_OP_HELPER1( \
+ EASTL_ATOMIC_INTERNAL_COMPILER_AVAILABLE(op), \
+ EASTL_ATOMIC_INTERNAL_ARCH_AVAILABLE(op), \
+ op \
+ )
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_BASE_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h
new file mode 100644
index 0000000..3cff493
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_strong.h
@@ -0,0 +1,245 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128)(type, ret, ptr, expected, desired)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CMPXCHG_STRONG_*(bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_8)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_16)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_32)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_64)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_RELEASE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_ACQ_REL_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_STRONG_SEQ_CST_128)(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_STRONG_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h
new file mode 100644
index 0000000..60ea8b0
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cmpxchg_weak.h
@@ -0,0 +1,245 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128)(type, ret, ptr, expected, desired)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CMPXCHG_WEAK_*(bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_8)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_8)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_16)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_16)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_32)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_32)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_64)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_64)(type, ret, ptr, expected, desired)
+
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELAXED_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQUIRE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_RELEASE_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_ACQ_REL_128)(type, ret, ptr, expected, desired)
+
+#define EASTL_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CMPXCHG_WEAK_SEQ_CST_128)(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CMPXCHG_WEAK_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h
new file mode 100644
index 0000000..96ea6d0
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_compiler_barrier.h
@@ -0,0 +1,30 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_COMPILER_BARRIER()
+//
+#define EASTL_ATOMIC_COMPILER_BARRIER() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_COMPILER_BARRIER)()
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type)
+//
+#define EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY)(val, type)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_COMPILER_BARRIER_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h
new file mode 100644
index 0000000..e027b57
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_cpu_pause.h
@@ -0,0 +1,22 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CPU_PAUSE()
+//
+#define EASTL_ATOMIC_CPU_PAUSE() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_PAUSE)()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_CPU_PAUSE_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h
new file mode 100644
index 0000000..0681318
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_exchange.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_EXCHANGE_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_EXCHANGE_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h
new file mode 100644
index 0000000..701fdf3
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_add.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_ADD_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_ADD_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h
new file mode 100644
index 0000000..831f1bf
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_and.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_AND_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_AND_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h
new file mode 100644
index 0000000..b132297
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_or.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_OR_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_OR_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h
new file mode 100644
index 0000000..0098064
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_sub.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_SUB_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_SUB_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h
new file mode 100644
index 0000000..2887ea5
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_fetch_xor.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_FETCH_XOR_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_FETCH_XOR_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h
new file mode 100644
index 0000000..7658059
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_load.h
@@ -0,0 +1,75 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+#define EASTL_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_8)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_8)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_8)(type, ret, ptr)
+
+
+#define EASTL_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_16)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_16)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_16)(type, ret, ptr)
+
+
+#define EASTL_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_32)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_32)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_32)(type, ret, ptr)
+
+
+#define EASTL_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_64)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_64)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_64)(type, ret, ptr)
+
+
+#define EASTL_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_RELAXED_128)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_ACQUIRE_128)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_SEQ_CST_128)(type, ret, ptr)
+
+
+#define EASTL_ATOMIC_LOAD_READ_DEPENDS_32(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_READ_DEPENDS_32)(type, ret, ptr)
+
+#define EASTL_ATOMIC_LOAD_READ_DEPENDS_64(type, ret, ptr) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_LOAD_READ_DEPENDS_64)(type, ret, ptr)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_LOAD_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h
new file mode 100644
index 0000000..14f7be9
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_memory_barrier.h
@@ -0,0 +1,38 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CPU_MB()
+//
+#define EASTL_ATOMIC_CPU_MB() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_MB)()
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CPU_WMB()
+//
+#define EASTL_ATOMIC_CPU_WMB() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_WMB)()
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_CPU_RMB()
+//
+#define EASTL_ATOMIC_CPU_RMB() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_CPU_RMB)()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_MEMORY_BARRIER_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h
new file mode 100644
index 0000000..c9ebd6e
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_or_fetch.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_OR_FETCH_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_OR_FETCH_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h
new file mode 100644
index 0000000..dd16b10
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_signal_fence.h
@@ -0,0 +1,34 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_SIGNAL_FENCE_*()
+//
+#define EASTL_ATOMIC_SIGNAL_FENCE_RELAXED() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_RELAXED)()
+
+#define EASTL_ATOMIC_SIGNAL_FENCE_ACQUIRE() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_ACQUIRE)()
+
+#define EASTL_ATOMIC_SIGNAL_FENCE_RELEASE() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_RELEASE)()
+
+#define EASTL_ATOMIC_SIGNAL_FENCE_ACQ_REL() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_ACQ_REL)()
+
+#define EASTL_ATOMIC_SIGNAL_FENCE_SEQ_CST() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SIGNAL_FENCE_SEQ_CST)()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_SIGNAL_FENCE_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h
new file mode 100644
index 0000000..64b662e
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_store.h
@@ -0,0 +1,68 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_STORE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#define EASTL_ATOMIC_STORE_RELAXED_8(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_8)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_RELEASE_8(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_8)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_8)(type, ptr, val)
+
+
+#define EASTL_ATOMIC_STORE_RELAXED_16(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_16)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_RELEASE_16(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_16)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_16)(type, ptr, val)
+
+
+#define EASTL_ATOMIC_STORE_RELAXED_32(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_32)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_RELEASE_32(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_32)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_32)(type, ptr, val)
+
+
+#define EASTL_ATOMIC_STORE_RELAXED_64(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_64)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_RELEASE_64(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_64)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_64)(type, ptr, val)
+
+
+#define EASTL_ATOMIC_STORE_RELAXED_128(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELAXED_128)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_RELEASE_128(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_RELEASE_128)(type, ptr, val)
+
+#define EASTL_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_STORE_SEQ_CST_128)(type, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_STORE_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h
new file mode 100644
index 0000000..330f38e
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_sub_fetch.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_SUB_FETCH_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_SUB_FETCH_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h
new file mode 100644
index 0000000..26492c5
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_thread_fence.h
@@ -0,0 +1,34 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_THREAD_FENCE_*()
+//
+#define EASTL_ATOMIC_THREAD_FENCE_RELAXED() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_RELAXED)()
+
+#define EASTL_ATOMIC_THREAD_FENCE_ACQUIRE() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_ACQUIRE)()
+
+#define EASTL_ATOMIC_THREAD_FENCE_RELEASE() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_RELEASE)()
+
+#define EASTL_ATOMIC_THREAD_FENCE_ACQ_REL() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_ACQ_REL)()
+
+#define EASTL_ATOMIC_THREAD_FENCE_SEQ_CST() \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_THREAD_FENCE_SEQ_CST)()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_THREAD_FENCE_H */
diff --git a/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h
new file mode 100644
index 0000000..4227647
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_macros/atomic_macros_xor_fetch.h
@@ -0,0 +1,98 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_8)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_8)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_16)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_16)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_32)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_32)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_64)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_64)(type, ret, ptr, val)
+
+
+#define EASTL_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELAXED_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQUIRE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_RELEASE_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_ACQ_REL_128)(type, ret, ptr, val)
+
+#define EASTL_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_ATOMIC_CHOOSE_OP_IMPL(ATOMIC_XOR_FETCH_SEQ_CST_128)(type, ret, ptr, val)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MACROS_XOR_FETCH_H */
diff --git a/include/EASTL/internal/atomic/atomic_memory_order.h b/include/EASTL/internal/atomic/atomic_memory_order.h
new file mode 100644
index 0000000..b1c1403
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_memory_order.h
@@ -0,0 +1,44 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H
+#define EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+struct memory_order_relaxed_s {};
+struct memory_order_read_depends_s {};
+struct memory_order_acquire_s {};
+struct memory_order_release_s {};
+struct memory_order_acq_rel_s {};
+struct memory_order_seq_cst_s {};
+
+
+} // namespace internal
+
+
+EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_relaxed = internal::memory_order_relaxed_s{};
+EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_read_depends = internal::memory_order_read_depends_s{};
+EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_acquire = internal::memory_order_acquire_s{};
+EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_release = internal::memory_order_release_s{};
+EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_acq_rel = internal::memory_order_acq_rel_s{};
+EASTL_CPP17_INLINE_VARIABLE constexpr auto memory_order_seq_cst = internal::memory_order_seq_cst_s{};
+
+
+} // namespace eastl
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_MEMORY_ORDER_H */
diff --git a/include/EASTL/internal/atomic/atomic_pointer.h b/include/EASTL/internal/atomic/atomic_pointer.h
new file mode 100644
index 0000000..18f6691
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_pointer.h
@@ -0,0 +1,281 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_POINTER_H
+#define EASTL_ATOMIC_INTERNAL_POINTER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+ template <typename T, unsigned width = sizeof(T)>
+ struct atomic_pointer_base;
+
+#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(funcName) \
+ template <typename Order> \
+ T* funcName(ptrdiff_t arg, Order order) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
+ } \
+ \
+ template <typename Order> \
+ T* funcName(ptrdiff_t arg, Order order) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ } \
+ \
+ T* funcName(ptrdiff_t arg) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(operatorOp) \
+ T* operator operatorOp() volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ } \
+ \
+ T* operator operatorOp(int) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+#define EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(operatorOp) \
+ T* operator operatorOp(ptrdiff_t arg) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ }
+
+
+ template <typename T, unsigned width>
+ struct atomic_pointer_base<T*, width> : public atomic_base_width<T*, width>
+ {
+ private:
+
+ using Base = atomic_base_width<T*, width>;
+
+ public: /* ctors */
+
+ EA_CONSTEXPR atomic_pointer_base(T* desired) EA_NOEXCEPT
+ : Base{ desired }
+ {
+ }
+
+ EA_CONSTEXPR atomic_pointer_base() EA_NOEXCEPT = default;
+
+ atomic_pointer_base(const atomic_pointer_base&) EA_NOEXCEPT = delete;
+
+ public: /* assignment operators */
+
+ using Base::operator=;
+
+ atomic_pointer_base& operator=(const atomic_pointer_base&) EA_NOEXCEPT = delete;
+ atomic_pointer_base& operator=(const atomic_pointer_base&) volatile EA_NOEXCEPT = delete;
+
+ public: /* fetch_add */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(fetch_add)
+
+ public: /* add_fetch */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(add_fetch)
+
+ public: /* fetch_sub */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(fetch_sub)
+
+ public: /* sub_fetch */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_FUNCS_IMPL(sub_fetch)
+
+ public: /* operator++ && operator-- */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(++)
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(--)
+
+ public: /* operator+= && operator-= */
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(+=)
+
+ EASTL_ATOMIC_POINTER_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(-=)
+
+ };
+
+
+ template <typename T, unsigned width = sizeof(T)>
+ struct atomic_pointer_width;
+
+#define EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits) \
+ T* retVal; \
+ { \
+ ptr_integral_type retType; \
+ ptr_integral_type addend = static_cast<ptr_integral_type>(arg) * static_cast<ptr_integral_type>(sizeof(T)); \
+ \
+ EA_PREPROCESSOR_JOIN(op, bits)(ptr_integral_type, retType, EASTL_ATOMIC_INTEGRAL_CAST(ptr_integral_type, this->GetAtomicAddress()), addend); \
+ \
+ retVal = reinterpret_cast<T*>(retType); \
+ } \
+ return retVal;
+
+#define EASTL_ATOMIC_POINTER_FETCH_IMPL(funcName, op, bits) \
+ T* funcName(ptrdiff_t arg) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(T); \
+ EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits); \
+ }
+
+#define EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, orderType, op, bits) \
+ T* funcName(ptrdiff_t arg, orderType) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_TYPE_IS_OBJECT(T); \
+ EASTL_ATOMIC_POINTER_FUNC_IMPL(op, bits); \
+ }
+
+#define EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, Order) \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, fetchOp), Order)
+
+#define EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(funcName, fetchOp, bits) \
+ using Base::funcName; \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_IMPL(funcName, EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_relaxed_s, \
+ EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _RELAXED_), bits) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acquire_s, \
+ EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _ACQUIRE_), bits) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_release_s, \
+ EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _RELEASE_), bits) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acq_rel_s, \
+ EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _ACQ_REL_), bits) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_seq_cst_s, \
+ EASTL_ATOMIC_POINTER_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits)
+
+#define EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(operatorOp, preFuncName, postFuncName) \
+ using Base::operator operatorOp; \
+ \
+ T* operator operatorOp() EA_NOEXCEPT \
+ { \
+ return preFuncName(1, eastl::memory_order_seq_cst); \
+ } \
+ \
+ T* operator operatorOp(int) EA_NOEXCEPT \
+ { \
+ return postFuncName(1, eastl::memory_order_seq_cst); \
+ }
+
+#define EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(operatorOp, funcName) \
+ using Base::operator operatorOp; \
+ \
+ T* operator operatorOp(ptrdiff_t arg) EA_NOEXCEPT \
+ { \
+ return funcName(arg, eastl::memory_order_seq_cst); \
+ }
+
+
+#define EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(bytes, bits) \
+ template <typename T> \
+ struct atomic_pointer_width<T*, bytes> : public atomic_pointer_base<T*, bytes> \
+ { \
+ private: \
+ \
+ using Base = atomic_pointer_base<T*, bytes>; \
+ using u_ptr_integral_type = EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(uint, bits), _t); \
+ using ptr_integral_type = EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(int, bits), _t); \
+ \
+ public: /* ctors */ \
+ \
+ EA_CONSTEXPR atomic_pointer_width(T* desired) EA_NOEXCEPT \
+ : Base{ desired } \
+ { \
+ } \
+ \
+ EA_CONSTEXPR atomic_pointer_width() EA_NOEXCEPT = default; \
+ \
+ atomic_pointer_width(const atomic_pointer_width&) EA_NOEXCEPT = delete; \
+ \
+ public: /* assignment operators */ \
+ \
+ using Base::operator=; \
+ \
+ atomic_pointer_width& operator=(const atomic_pointer_width&) EA_NOEXCEPT = delete; \
+ atomic_pointer_width& operator=(const atomic_pointer_width&) volatile EA_NOEXCEPT = delete; \
+ \
+ public: /* fetch_add */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(fetch_add, FETCH_ADD, bits) \
+ \
+ public: /* add_fetch */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(add_fetch, ADD_FETCH, bits) \
+ \
+ public: /* fetch_sub */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(fetch_sub, FETCH_SUB, bits) \
+ \
+ public: /* sub_fetch */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_FUNCS_IMPL(sub_fetch, SUB_FETCH, bits) \
+ \
+ public: /* operator++ && operator-- */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(++, add_fetch, fetch_add) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_INC_DEC_OPERATOR_IMPL(--, sub_fetch, fetch_sub) \
+ \
+ public: /* operator+= && operator-= */ \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(+=, add_fetch) \
+ \
+ EASTL_ATOMIC_POINTER_FETCH_ASSIGNMENT_OPERATOR_IMPL(-=, sub_fetch) \
+ \
+ public: \
+ \
+ using Base::load; \
+ \
+ T* load(eastl::internal::memory_order_read_depends_s) EA_NOEXCEPT \
+ { \
+ T* retPointer; \
+ EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_LOAD_READ_DEPENDS_, bits)(T*, retPointer, this->GetAtomicAddress()); \
+ return retPointer; \
+ } \
+ };
+
+
+#if defined(EASTL_ATOMIC_HAS_32BIT) && EA_PLATFORM_PTR_SIZE == 4
+ EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(4, 32)
+#endif
+
+#if defined(EASTL_ATOMIC_HAS_64BIT) && EA_PLATFORM_PTR_SIZE == 8
+ EASTL_ATOMIC_POINTER_WIDTH_SPECIALIZE(8, 64)
+#endif
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_POINTER_H */
diff --git a/include/EASTL/internal/atomic/atomic_pop_compiler_options.h b/include/EASTL/internal/atomic/atomic_pop_compiler_options.h
new file mode 100644
index 0000000..92f241a
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_pop_compiler_options.h
@@ -0,0 +1,11 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+/* NOTE: No Header Guard */
+
+
+EA_RESTORE_VC_WARNING();
+
+EA_RESTORE_CLANG_WARNING();
diff --git a/include/EASTL/internal/atomic/atomic_push_compiler_options.h b/include/EASTL/internal/atomic/atomic_push_compiler_options.h
new file mode 100644
index 0000000..c5a5471
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_push_compiler_options.h
@@ -0,0 +1,17 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+/* NOTE: No Header Guard */
+
+
+// 'class' : multiple assignment operators specified
+EA_DISABLE_VC_WARNING(4522);
+
+// misaligned atomic operation may incur significant performance penalty
+// The above warning is emitted in earlier versions of clang incorrectly.
+// All eastl::atomic<T> objects are size aligned.
+// This is static and runtime asserted.
+// Thus we disable this warning.
+EA_DISABLE_CLANG_WARNING(-Watomic-alignment);
diff --git a/include/EASTL/internal/atomic/atomic_size_aligned.h b/include/EASTL/internal/atomic/atomic_size_aligned.h
new file mode 100644
index 0000000..db23e47
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_size_aligned.h
@@ -0,0 +1,197 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H
+#define EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#include "atomic_push_compiler_options.h"
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(funcName) \
+ template <typename OrderSuccess, typename OrderFailure> \
+ bool funcName(T& expected, T desired, \
+ OrderSuccess orderSuccess, \
+ OrderFailure orderFailure) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
+ return false; \
+ } \
+ \
+ template <typename OrderSuccess, typename OrderFailure> \
+ bool funcName(T& expected, T desired, \
+ OrderSuccess orderSuccess, \
+ OrderFailure orderFailure) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ return false; \
+ } \
+ \
+ template <typename Order> \
+ bool funcName(T& expected, T desired, \
+ Order order) EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
+ return false; \
+ } \
+ \
+ template <typename Order> \
+ bool funcName(T& expected, T desired, \
+ Order order) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ return false; \
+ } \
+ \
+ bool funcName(T& expected, T desired) volatile EA_NOEXCEPT \
+ { \
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
+ return false; \
+ }
+
+#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_WEAK_IMPL() \
+ EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(compare_exchange_weak)
+
+#define EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_STRONG_IMPL() \
+ EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_IMPL(compare_exchange_strong)
+
+
+ template<typename T>
+ struct atomic_size_aligned
+ {
+ public: /* ctors */
+
+ EA_CONSTEXPR atomic_size_aligned(T desired) EA_NOEXCEPT
+ : mAtomic{ desired }
+ {
+ }
+
+ EA_CONSTEXPR atomic_size_aligned() EA_NOEXCEPT_IF(eastl::is_nothrow_default_constructible_v<T>)
+ : mAtomic{} /* Value-Initialize which will Zero-Initialize Trivial Constructible types */
+ {
+ }
+
+ atomic_size_aligned(const atomic_size_aligned&) EA_NOEXCEPT = delete;
+
+ public: /* store */
+
+ template <typename Order>
+ void store(T desired, Order order) EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T);
+ }
+
+ template <typename Order>
+ void store(T desired, Order order) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ void store(T desired) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ public: /* load */
+
+ template <typename Order>
+ T load(Order order) const EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T);
+ }
+
+ template <typename Order>
+ T load(Order order) const volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ T load() const volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ public: /* exchange */
+
+ template <typename Order>
+ T exchange(T desired, Order order) EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T);
+ }
+
+ template <typename Order>
+ T exchange(T desired, Order order) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ T exchange(T desired) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ public: /* compare_exchange_weak */
+
+ EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_WEAK_IMPL()
+
+ public: /* compare_exchange_strong */
+
+ EASTL_ATOMIC_SIZE_ALIGNED_STATIC_ASSERT_CMPXCHG_STRONG_IMPL()
+
+ public: /* assignment operator */
+
+ T operator=(T desired) volatile EA_NOEXCEPT
+ {
+ EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T);
+ }
+
+ atomic_size_aligned& operator=(const atomic_size_aligned&) EA_NOEXCEPT = delete;
+ atomic_size_aligned& operator=(const atomic_size_aligned&) volatile EA_NOEXCEPT = delete;
+
+ protected: /* Accessors */
+
+ T* GetAtomicAddress() const EA_NOEXCEPT
+ {
+ return eastl::addressof(mAtomic);
+ }
+
+ private:
+
+ /**
+ * Some compilers such as MSVC will align 64-bit values on 32-bit machines on
+ * 4-byte boundaries which can ruin the atomicity guarantees.
+ *
+ * Ensure everything is size aligned.
+ *
+ * mutable is needed in cases such as when loads are only guaranteed to be atomic
+ * using a compare exchange, such as for 128-bit atomics, so we need to be able
+ * to have write access to the variable as one example.
+ */
+ EA_ALIGN(sizeof(T)) mutable T mAtomic;
+ };
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+#include "atomic_pop_compiler_options.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_SIZE_ALIGNED_H */
diff --git a/include/EASTL/internal/atomic/atomic_standalone.h b/include/EASTL/internal/atomic/atomic_standalone.h
new file mode 100644
index 0000000..011d5fb
--- /dev/null
+++ b/include/EASTL/internal/atomic/atomic_standalone.h
@@ -0,0 +1,470 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_STANDALONE_H
+#define EASTL_ATOMIC_INTERNAL_STANDALONE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+namespace eastl
+{
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// bool atomic_compare_exchange_strong(eastl::atomic<T>*, T* expected, T desired)
+//
+template <typename T>
+EASTL_FORCE_INLINE bool atomic_compare_exchange_strong(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type* expected,
+ typename eastl::atomic<T>::value_type desired) EA_NOEXCEPT
+{
+ return atomicObj->compare_exchange_strong(*expected, desired);
+}
+
+template <typename T, typename OrderSuccess, typename OrderFailure>
+EASTL_FORCE_INLINE bool atomic_compare_exchange_strong_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type* expected,
+ typename eastl::atomic<T>::value_type desired,
+ OrderSuccess orderSuccess, OrderFailure orderFailure) EA_NOEXCEPT
+{
+ return atomicObj->compare_exchange_strong(*expected, desired, orderSuccess, orderFailure);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// bool atomic_compare_exchange_weak(eastl::atomic<T>*, T* expected, T desired)
+//
+template <typename T>
+EASTL_FORCE_INLINE bool atomic_compare_exchange_weak(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type* expected,
+ typename eastl::atomic<T>::value_type desired) EA_NOEXCEPT
+{
+ return atomicObj->compare_exchange_weak(*expected, desired);
+}
+
+template <typename T, typename OrderSuccess, typename OrderFailure>
+EASTL_FORCE_INLINE bool atomic_compare_exchange_weak_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type* expected,
+ typename eastl::atomic<T>::value_type desired,
+ OrderSuccess orderSuccess, OrderFailure orderFailure) EA_NOEXCEPT
+{
+ return atomicObj->compare_exchange_weak(*expected, desired, orderSuccess, orderFailure);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_fetch_xor(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_xor(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->fetch_xor(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_xor_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->fetch_xor(arg, order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_xor_fetch(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_xor_fetch(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->xor_fetch(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_xor_fetch_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->xor_fetch(arg, order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_fetch_or(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_or(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->fetch_or(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_or_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->fetch_or(arg, order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_or_fetch(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_or_fetch(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->or_fetch(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_or_fetch_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->or_fetch(arg, order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_fetch_and(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_and(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->fetch_and(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_and_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->fetch_and(arg, order);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_and_fetch(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_and_fetch(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg) EA_NOEXCEPT
+{
+ return atomicObj->and_fetch(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_and_fetch_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->and_fetch(arg, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_fetch_sub(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_sub(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg) EA_NOEXCEPT
+{
+ return atomicObj->fetch_sub(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_sub_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->fetch_sub(arg, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_sub_fetch(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_sub_fetch(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg) EA_NOEXCEPT
+{
+ return atomicObj->sub_fetch(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_sub_fetch_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->sub_fetch(arg, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_fetch_add(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_add(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg) EA_NOEXCEPT
+{
+ return atomicObj->fetch_add(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_fetch_add_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->fetch_add(arg, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_add_fetch(eastl::atomic<T>*, T arg)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_add_fetch(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg) EA_NOEXCEPT
+{
+ return atomicObj->add_fetch(arg);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_add_fetch_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::difference_type arg,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->add_fetch(arg, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_exchange(eastl::atomic<T>*, T desired)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_exchange(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type desired) EA_NOEXCEPT
+{
+ return atomicObj->exchange(desired);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_exchange_explicit(eastl::atomic<T>* atomicObj,
+ typename eastl::atomic<T>::value_type desired,
+ Order order) EA_NOEXCEPT
+{
+ return atomicObj->exchange(desired, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_load(const eastl::atomic<T>*)
+//
+template <typename T>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_load(const eastl::atomic<T>* atomicObj) EA_NOEXCEPT
+{
+ return atomicObj->load();
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_load_explicit(const eastl::atomic<T>* atomicObj, Order order) EA_NOEXCEPT
+{
+ return atomicObj->load(order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// T atomic_load_cond(const eastl::atomic<T>*)
+//
+template <typename T, typename Predicate>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_load_cond(const eastl::atomic<T>* atomicObj, Predicate pred) EA_NOEXCEPT
+{
+ for (;;)
+ {
+ typename eastl::atomic<T>::value_type ret = atomicObj->load();
+
+ if (pred(ret))
+ {
+ return ret;
+ }
+
+ EASTL_ATOMIC_CPU_PAUSE();
+ }
+}
+
+template <typename T, typename Predicate, typename Order>
+EASTL_FORCE_INLINE typename eastl::atomic<T>::value_type atomic_load_cond_explicit(const eastl::atomic<T>* atomicObj, Predicate pred, Order order) EA_NOEXCEPT
+{
+ for (;;)
+ {
+ typename eastl::atomic<T>::value_type ret = atomicObj->load(order);
+
+ if (pred(ret))
+ {
+ return ret;
+ }
+
+ EASTL_ATOMIC_CPU_PAUSE();
+ }
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void atomic_store(eastl::atomic<T>*, T)
+//
+template <typename T>
+EASTL_FORCE_INLINE void atomic_store(eastl::atomic<T>* atomicObj, typename eastl::atomic<T>::value_type desired) EA_NOEXCEPT
+{
+ atomicObj->store(desired);
+}
+
+template <typename T, typename Order>
+EASTL_FORCE_INLINE void atomic_store_explicit(eastl::atomic<T>* atomicObj, typename eastl::atomic<T>::value_type desired, Order order) EA_NOEXCEPT
+{
+ atomicObj->store(desired, order);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void eastl::atomic_thread_fence(Order)
+//
+template <typename Order>
+EASTL_FORCE_INLINE void atomic_thread_fence(Order) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
+}
+
+EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_THREAD_FENCE_RELAXED();
+}
+
+EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_THREAD_FENCE_ACQUIRE();
+}
+
+EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_release_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_THREAD_FENCE_RELEASE();
+}
+
+EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_THREAD_FENCE_ACQ_REL();
+}
+
+EASTL_FORCE_INLINE void atomic_thread_fence(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_THREAD_FENCE_SEQ_CST();
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void eastl::atomic_signal_fence(Order)
+//
+template <typename Order>
+EASTL_FORCE_INLINE void atomic_signal_fence(Order) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(Order);
+}
+
+EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_relaxed_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_SIGNAL_FENCE_RELAXED();
+}
+
+EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_acquire_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_SIGNAL_FENCE_ACQUIRE();
+}
+
+EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_release_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_SIGNAL_FENCE_RELEASE();
+}
+
+EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_acq_rel_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_SIGNAL_FENCE_ACQ_REL();
+}
+
+EASTL_FORCE_INLINE void atomic_signal_fence(eastl::internal::memory_order_seq_cst_s) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_SIGNAL_FENCE_SEQ_CST();
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void eastl::compiler_barrier()
+//
+EASTL_FORCE_INLINE void compiler_barrier() EA_NOEXCEPT
+{
+ EASTL_ATOMIC_COMPILER_BARRIER();
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void eastl::compiler_barrier_data_dependency(const T&)
+//
+template <typename T>
+EASTL_FORCE_INLINE void compiler_barrier_data_dependency(const T& val) EA_NOEXCEPT
+{
+ EASTL_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, T);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void eastl::cpu_pause()
+//
+EASTL_FORCE_INLINE void cpu_pause() EA_NOEXCEPT
+{
+ EASTL_ATOMIC_CPU_PAUSE();
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// bool eastl::atomic_is_lock_free(eastl::atomic<T>*)
+//
+template <typename T>
+EASTL_FORCE_INLINE bool atomic_is_lock_free(const eastl::atomic<T>* atomicObj) EA_NOEXCEPT
+{
+ return atomicObj->is_lock_free();
+}
+
+
+} // namespace eastl
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_STANDALONE_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler.h b/include/EASTL/internal/atomic/compiler/compiler.h
new file mode 100644
index 0000000..65a4cd0
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler.h
@@ -0,0 +1,120 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// Include the compiler specific implementations
+//
+#if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)
+
+ #include "gcc/compiler_gcc.h"
+
+#elif defined(EA_COMPILER_MSVC)
+
+ #include "msvc/compiler_msvc.h"
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+namespace eastl
+{
+
+
+namespace internal
+{
+
+
+/**
+ * NOTE:
+ *
+ * This can be used by specific compiler implementations to implement a data dependency compiler barrier.
+ * Some compiler barriers do not take in input dependencies as is possible with the gcc asm syntax.
+ * Thus we need a way to create a false dependency on the input variable so the compiler does not dead-store
+ * remove it.
+ * A volatile function pointer ensures the compiler must always load the function pointer and call thru it
+ * since the compiler cannot reason about any side effects. Thus the compiler must always assume the
+ * input variable may be accessed and thus cannot be dead-stored. This technique works even in the presence
+ * of Link-Time Optimization. A compiler barrier with a data dependency is useful in these situations.
+ *
+ * void foo()
+ * {
+ * eastl::vector<int> v;
+ * while (Benchmark.ContinueRunning())
+ * {
+ * v.push_back(0);
+ * eastl::compiler_barrier(); OR eastl::compiler_barrier_data_dependency(v);
+ * }
+ * }
+ *
+ * We are trying to benchmark the push_back function of a vector. The vector v has only local scope.
+ * The compiler is well within its writes to remove all accesses to v even with the compiler barrier
+ * because there are no observable uses of the vector v.
+ * The compiler barrier data dependency ensures there is an input dependency on the variable so that
+ * it isn't removed. This is also useful when writing test code that the compiler may remove.
+ */
+
+typedef void (*CompilerBarrierDataDependencyFuncPtr)(void*);
+
+extern EASTL_API volatile CompilerBarrierDataDependencyFuncPtr gCompilerBarrierDataDependencyFunc;
+
+
+#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_FUNC(ptr) \
+ eastl::internal::gCompilerBarrierDataDependencyFunc(ptr)
+
+
+} // namespace internal
+
+
+} // namespace eastl
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "compiler_fetch_add.h"
+#include "compiler_fetch_sub.h"
+
+#include "compiler_fetch_and.h"
+#include "compiler_fetch_xor.h"
+#include "compiler_fetch_or.h"
+
+#include "compiler_add_fetch.h"
+#include "compiler_sub_fetch.h"
+
+#include "compiler_and_fetch.h"
+#include "compiler_xor_fetch.h"
+#include "compiler_or_fetch.h"
+
+#include "compiler_exchange.h"
+
+#include "compiler_cmpxchg_weak.h"
+#include "compiler_cmpxchg_strong.h"
+
+#include "compiler_load.h"
+#include "compiler_store.h"
+
+#include "compiler_barrier.h"
+
+#include "compiler_cpu_pause.h"
+
+#include "compiler_memory_barrier.h"
+
+#include "compiler_signal_fence.h"
+
+#include "compiler_thread_fence.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h b/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h
new file mode 100644
index 0000000..763921c
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_add_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_ADD_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h b/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h
new file mode 100644
index 0000000..7b1e0a4
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_and_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_AND_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_barrier.h b/include/EASTL/internal/atomic/compiler/compiler_barrier.h
new file mode 100644
index 0000000..550070e
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_barrier.h
@@ -0,0 +1,36 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER()
+//
+#if defined(EASTL_COMPILER_ATOMIC_COMPILER_BARRIER)
+ #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type)
+//
+#if defined(EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY)
+ #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_BARRIER_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h b/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h
new file mode 100644
index 0000000..2ee2971
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_strong.h
@@ -0,0 +1,430 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_STRONG_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h b/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h
new file mode 100644
index 0000000..9bc1a62
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_cmpxchg_weak.h
@@ -0,0 +1,430 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128_AVAILABLE \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128_AVAILABLE
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CMPXCHG_WEAK_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h b/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h
new file mode 100644
index 0000000..073b3fb
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_cpu_pause.h
@@ -0,0 +1,32 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CPU_PAUSE()
+//
+#if defined(EASTL_COMPILER_ATOMIC_CPU_PAUSE)
+
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE_AVAILABLE 1
+
+#else
+
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \
+ ((void)0)
+
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE_AVAILABLE 1
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_CPU_PAUSE_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_exchange.h b/include/EASTL/internal/atomic/compiler/compiler_exchange.h
new file mode 100644
index 0000000..d82b199
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_exchange.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_EXCHANGE_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h b/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h
new file mode 100644
index 0000000..e6c4238
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_fetch_add.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_ADD_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h b/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h
new file mode 100644
index 0000000..b0976fc
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_fetch_and.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_AND_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h b/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h
new file mode 100644
index 0000000..2e6cfda
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_fetch_or.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_OR_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h b/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h
new file mode 100644
index 0000000..d7ed86c
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_fetch_sub.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_SUB_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h b/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h
new file mode 100644
index 0000000..10cf7d9
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_fetch_xor.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_FETCH_XOR_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_load.h b/include/EASTL/internal/atomic/compiler/compiler_load.h
new file mode 100644
index 0000000..734dbb8
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_load.h
@@ -0,0 +1,139 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+/**
+ * NOTE:
+ *
+ * These are used for data-dependent reads thru a pointer. It is safe
+ * to assume that pointer-sized reads are atomic on any given platform.
+ * This implementation assumes the hardware doesn't reorder dependent
+ * loads unlike the DEC Alpha.
+ */
+#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr) \
+ { \
+ static_assert(eastl::is_pointer_v<type>, "eastl::atomic<T> : Read Depends Type must be a Pointer Type!"); \
+ static_assert(eastl::is_pointer_v<eastl::remove_pointer_t<decltype(ptr)>>, "eastl::atomic<T> : Read Depends Ptr must be a Pointer to a Pointer!"); \
+ \
+ ret = (*EASTL_ATOMIC_VOLATILE_CAST(ptr)); \
+ }
+
+#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_32(type, ret, ptr) \
+ EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_64(type, ret, ptr) \
+ EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_N(type, ret, ptr)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_32_AVAILABLE 1
+#define EASTL_COMPILER_ATOMIC_LOAD_READ_DEPENDS_64_AVAILABLE 1
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_LOAD_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h b/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h
new file mode 100644
index 0000000..ac3923c
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_memory_barrier.h
@@ -0,0 +1,47 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CPU_MB()
+//
+#if defined(EASTL_COMPILER_ATOMIC_CPU_MB)
+ #define EASTL_COMPILER_ATOMIC_CPU_MB_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CPU_MB_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CPU_WMB()
+//
+#if defined(EASTL_COMPILER_ATOMIC_CPU_WMB)
+ #define EASTL_COMPILER_ATOMIC_CPU_WMB_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CPU_WMB_AVAILABLE 0
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CPU_RMB()
+//
+#if defined(EASTL_COMPILER_ATOMIC_CPU_RMB)
+ #define EASTL_COMPILER_ATOMIC_CPU_RMB_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_CPU_RMB_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MEMORY_BARRIER_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h b/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h
new file mode 100644
index 0000000..a26a72c
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_or_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_OR_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h b/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h
new file mode 100644
index 0000000..25b0b74
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_signal_fence.h
@@ -0,0 +1,49 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*()
+//
+#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED)
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE)
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE)
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL)
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST)
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_SIGNAL_FENCE_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_store.h b/include/EASTL/internal/atomic/compiler/compiler_store.h
new file mode 100644
index 0000000..1a553e2
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_store.h
@@ -0,0 +1,113 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_STORE_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h b/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h
new file mode 100644
index 0000000..4b7eea9
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_sub_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_SUB_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h b/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h
new file mode 100644
index 0000000..01d8f0f
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_thread_fence.h
@@ -0,0 +1,49 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_THREAD_FENCE_*()
+//
+#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED)
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE)
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE)
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL)
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST)
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_THREAD_FENCE_H */
diff --git a/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h b/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h
new file mode 100644
index 0000000..05680bd
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/compiler_xor_fetch.h
@@ -0,0 +1,173 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64_AVAILABLE 0
+#endif
+
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128_AVAILABLE 0
+#endif
+
+#if defined(EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128)
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 1
+#else
+ #define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128_AVAILABLE 0
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_XOR_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h
new file mode 100644
index 0000000..26a99c2
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc.h
@@ -0,0 +1,154 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/**
+ * NOTE:
+ *
+ * gcc __atomic builtins may defer to function calls in libatomic.so for architectures that do not
+ * support atomic instructions of a given size. These functions will be implemented with pthread_mutex_t.
+ * It also requires the explicit linking against the compiler runtime libatomic.so.
+ * On architectures that do not support atomics, like armv6 the builtins may defer to kernel helpers
+ * or on classic uniprocessor systems just disable interrupts.
+ *
+ * We do not want to have to link against libatomic.so or fall into the trap of our atomics degrading
+ * into locks. We would rather have user-code explicitly use locking primitives if their code cannot
+ * be satisfied with atomic instructions on the given platform.
+ */
+static_assert(__atomic_always_lock_free(1, 0), "eastl::atomic<T> where sizeof(T) == 1 must be lock-free!");
+static_assert(__atomic_always_lock_free(2, 0), "eastl::atomic<T> where sizeof(T) == 2 must be lock-free!");
+static_assert(__atomic_always_lock_free(4, 0), "eastl::atomic<T> where sizeof(T) == 4 must be lock-free!");
+#if EA_PLATFORM_PTR_SIZE == 8
+ static_assert(__atomic_always_lock_free(8, 0), "eastl::atomic<T> where sizeof(T) == 8 must be lock-free!");
+#endif
+
+/**
+ * NOTE:
+ *
+ * The following can fail on gcc/clang on 64-bit systems.
+ * Firstly, it depends on the -march setting on clang whether or not it calls out to libatomic for 128-bit operations.
+ * Second, gcc always calls out to libatomic for 128-bit atomics. It is unclear if it uses locks
+ * or tries to look at the cpuid and use cmpxchg16b if its available.
+ * gcc mailing lists argue that since load must be implemented with cmpxchg16b, then the __atomic bultin
+ * cannot be used in read-only memory which is why they always call out to libatomic.
+ * There is no way to tell gcc to not do that, unfortunately.
+ * We don't care about the read-only restriction because our eastl::atomic<T> object is mutable
+ * and also msvc doesn't enforce this restriction thus to be fully platform agnostic we cannot either.
+ *
+ * Therefore, the follow static_assert is commented out for the time being, as it always fails on these compilers.
+ * We still guarantee 128-bit atomics are lock-free by handrolling the inline assembly ourselves.
+ *
+ * static_assert(__atomic_always_lock_free(16, 0), "eastl::atomic<T> where sizeof(T) == 16 must be lock-free!");
+ */
+
+/**
+ * NOTE:
+ *
+ * Why do we do the cast to the unsigned fixed width types for every operation even though gcc/clang builtins are generics?
+ * Well gcc/clang correctly-incorrectly call out to libatomic and do locking on user types that may be potentially misaligned.
+ * struct UserType { uint8_t a,b; }; This given struct is 2 bytes in size but has only 1 byte alignment.
+ * gcc/clang cannot and doesn't know that we always guarantee every type T is size aligned within eastl::atomic<T>.
+ * Therefore it always emits calls into libatomic and does locking for structs like these which we do not want.
+ * Therefore you'll notice we always cast each atomic ptr type to the equivalent unsigned fixed width type when doing the atomic operations.
+ * This ensures all user types are size aligned and thus are lock free.
+ */
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_COMPILER_ATOMIC_HAS_8BIT
+#define EASTL_COMPILER_ATOMIC_HAS_16BIT
+#define EASTL_COMPILER_ATOMIC_HAS_32BIT
+#define EASTL_COMPILER_ATOMIC_HAS_64BIT
+
+#if EA_PLATFORM_PTR_SIZE == 8
+ #define EASTL_COMPILER_ATOMIC_HAS_128BIT
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8 uint8_t
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16 uint16_t
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32 uint32_t
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64 uint64_t
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128 __uint128_t
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, gccMemoryOrder) \
+ { \
+ integralType retIntegral; \
+ integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \
+ \
+ retIntegral = fetchIntrinsic(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), valIntegral, gccMemoryOrder); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, weak, successOrder, failOrder) \
+ ret = __atomic_compare_exchange(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
+ EASTL_ATOMIC_INTEGRAL_CAST(integralType, (expected)), \
+ EASTL_ATOMIC_INTEGRAL_CAST(integralType, &(desired)), \
+ weak, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_INTRIN_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ { \
+ integralType retIntegral; \
+ integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \
+ \
+ __atomic_exchange(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
+ &valIntegral, &retIntegral, gccMemoryOrder); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "compiler_gcc_fetch_add.h"
+#include "compiler_gcc_fetch_sub.h"
+
+#include "compiler_gcc_fetch_and.h"
+#include "compiler_gcc_fetch_xor.h"
+#include "compiler_gcc_fetch_or.h"
+
+#include "compiler_gcc_add_fetch.h"
+#include "compiler_gcc_sub_fetch.h"
+
+#include "compiler_gcc_and_fetch.h"
+#include "compiler_gcc_xor_fetch.h"
+#include "compiler_gcc_or_fetch.h"
+
+#include "compiler_gcc_exchange.h"
+
+#include "compiler_gcc_cmpxchg_weak.h"
+#include "compiler_gcc_cmpxchg_strong.h"
+
+#include "compiler_gcc_load.h"
+#include "compiler_gcc_store.h"
+
+#include "compiler_gcc_barrier.h"
+
+#include "compiler_gcc_cpu_pause.h"
+
+#include "compiler_gcc_signal_fence.h"
+
+#include "compiler_gcc_thread_fence.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h
new file mode 100644
index 0000000..1d19196
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_add_fetch.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_add_fetch, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_ADD_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_ADD_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h
new file mode 100644
index 0000000..a35307f
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_and_fetch.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_and_fetch, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_AND_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_AND_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_AND_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_AND_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_AND_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_AND_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_AND_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h
new file mode 100644
index 0000000..64e8e54
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_barrier.h
@@ -0,0 +1,30 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER()
+//
+#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() \
+ __asm__ __volatile__ ("" ::: "memory")
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type)
+//
+#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \
+ __asm__ __volatile__ ("" : /* Output Operands */ : "r"(&(val)) : "memory")
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_BARRIER_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h
new file mode 100644
index 0000000..3e47cf2
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_strong.h
@@ -0,0 +1,182 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(integralType, type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, false, successOrder, failOrder)
+
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint8_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint16_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint32_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(uint64_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_N(__uint128_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_STRONG_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h
new file mode 100644
index 0000000..f55fe3a
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cmpxchg_weak.h
@@ -0,0 +1,182 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(integralType, type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_INTRIN_N(integralType, type, ret, ptr, expected, desired, true, successOrder, failOrder)
+
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint8_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint16_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint32_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(uint64_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+#define EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, successOrder, failOrder) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_N(__uint128_t, type, ret, ptr, expected, desired, successOrder, failOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_RELEASE, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_8(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type,ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_16(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_32(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_64(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_GCC_ATOMIC_CMPXCHG_WEAK_128(type, ret, ptr, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CMPXCHG_WEAK_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h
new file mode 100644
index 0000000..9d4ac35
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_cpu_pause.h
@@ -0,0 +1,31 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CPU_PAUSE()
+//
+#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \
+ __asm__ __volatile__ ("pause")
+
+#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64)
+
+ #define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \
+ __asm__ __volatile__ ("yield")
+
+#endif
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_CPU_PAUSE_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h
new file mode 100644
index 0000000..a332554
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_exchange.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_INTRIN_N(integralType, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_EXCHANGE_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_EXCHANGE_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h
new file mode 100644
index 0000000..98abbb8
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_add.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_add, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_ADD_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_ADD_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h
new file mode 100644
index 0000000..0dfb81d
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_and.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_and, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_AND_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_AND_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_AND_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_AND_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_AND_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_AND_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_AND_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h
new file mode 100644
index 0000000..ba259b7
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_or.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_or, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_OR_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_OR_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_OR_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_OR_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_OR_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_OR_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_OR_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h
new file mode 100644
index 0000000..c8be225
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_sub.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_sub, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_SUB_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_SUB_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h
new file mode 100644
index 0000000..4ec6d67
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_fetch_xor.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_fetch_xor, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_FETCH_XOR_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_FETCH_XOR_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h
new file mode 100644
index 0000000..a4a3ebf
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_load.h
@@ -0,0 +1,90 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_LOAD_N(integralType, type, ret, ptr, gccMemoryOrder) \
+ { \
+ integralType retIntegral; \
+ __atomic_load(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), &retIntegral, gccMemoryOrder); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+#define EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_LOAD_N(uint8_t, type, ret, ptr, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_LOAD_N(uint16_t, type, ret, ptr, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_LOAD_N(uint32_t, type, ret, ptr, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_LOAD_N(uint64_t, type, ret, ptr, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_LOAD_N(__uint128_t, type, ret, ptr, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_LOAD_*_N(type, type ret, type * ptr)
+//
+#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_8(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_16(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_32(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_64(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_RELAXED_128(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_8(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_16(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_32(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_64(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_ACQUIRE_128(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_8(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_8(type, ret, ptr, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_16(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_16(type, ret, ptr, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_32(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_32(type, ret, ptr, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_64(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_64(type, ret, ptr, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_LOAD_SEQ_CST_128(type, ret, ptr) \
+ EASTL_GCC_ATOMIC_LOAD_128(type, ret, ptr, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_LOAD_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h
new file mode 100644
index 0000000..9e4db3e
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_or_fetch.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_or_fetch, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_OR_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_OR_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_OR_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_OR_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_OR_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_OR_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_OR_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h
new file mode 100644
index 0000000..16dff14
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_signal_fence.h
@@ -0,0 +1,38 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_SIGNAL_FENCE(gccMemoryOrder) \
+ __atomic_signal_fence(gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*()
+//
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED() \
+ EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE() \
+ EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE() \
+ EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL() \
+ EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST() \
+ EASTL_GCC_ATOMIC_SIGNAL_FENCE(__ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SIGNAL_FENCE_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h
new file mode 100644
index 0000000..04a28ac
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_store.h
@@ -0,0 +1,89 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_STORE_N(integralType, ptr, val, gccMemoryOrder) \
+ { \
+ integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)); \
+ __atomic_store(EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), &valIntegral, gccMemoryOrder); \
+ }
+
+
+#define EASTL_GCC_ATOMIC_STORE_8(ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_STORE_N(uint8_t, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_STORE_16(ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_STORE_N(uint16_t, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_STORE_32(ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_STORE_N(uint32_t, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_STORE_64(ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_STORE_N(uint64_t, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_STORE_128(ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_STORE_N(__uint128_t, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_STORE_*_N(type, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_8(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_16(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_32(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_64(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELAXED_128(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_8(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_16(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_32(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_64(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_STORE_RELEASE_128(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_8(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_8(ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_16(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_16(ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_32(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_32(ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_64(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_64(ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_STORE_SEQ_CST_128(type, ptr, val) \
+ EASTL_GCC_ATOMIC_STORE_128(ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_STORE_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h
new file mode 100644
index 0000000..62f8cd9
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_sub_fetch.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_sub_fetch, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_SUB_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_SUB_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h
new file mode 100644
index 0000000..0dd005e
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_thread_fence.h
@@ -0,0 +1,38 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_THREAD_FENCE(gccMemoryOrder) \
+ __atomic_thread_fence(gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_THREAD_FENCE_*()
+//
+#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELAXED() \
+ EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQUIRE() \
+ EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_RELEASE() \
+ EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_ACQ_REL() \
+ EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_THREAD_FENCE_SEQ_CST() \
+ EASTL_GCC_ATOMIC_THREAD_FENCE(__ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_THREAD_FENCE_H */
diff --git a/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h
new file mode 100644
index 0000000..4827d79
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/gcc/compiler_gcc_xor_fetch.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_N(integralType, type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_FETCH_INTRIN_N(integralType, __atomic_xor_fetch, type, ret, ptr, val, gccMemoryOrder)
+
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_N(uint8_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_N(uint16_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_N(uint32_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_N(uint64_t, type, ret, ptr, val, gccMemoryOrder)
+
+#define EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, gccMemoryOrder) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_N(__uint128_t, type, ret, ptr, val, gccMemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_GCC_ATOMIC_XOR_FETCH_128(type, ret, ptr, val, __ATOMIC_SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_GCC_XOR_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h
new file mode 100644
index 0000000..6df8c05
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc.h
@@ -0,0 +1,260 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+EA_DISABLE_ALL_VC_WARNINGS();
+#include <Windows.h>
+#include <intrin.h>
+EA_RESTORE_ALL_VC_WARNINGS();
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_COMPILER_ATOMIC_HAS_8BIT
+#define EASTL_COMPILER_ATOMIC_HAS_16BIT
+#define EASTL_COMPILER_ATOMIC_HAS_32BIT
+#define EASTL_COMPILER_ATOMIC_HAS_64BIT
+
+#if EA_PLATFORM_PTR_SIZE == 8
+ #define EASTL_COMPILER_ATOMIC_HAS_128BIT
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8 char
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16 short
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32 long
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64 __int64
+
+namespace eastl
+{
+
+namespace internal
+{
+
+struct FixedWidth128
+{
+ __int64 value[2];
+};
+
+} // namespace internal
+
+} // namespace eastl
+
+#define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128 eastl::internal::FixedWidth128
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+/**
+ * NOTE:
+ *
+ * Unfortunately MSVC Intrinsics depend on the architecture
+ * that we are compiling for.
+ * These are some indirection macros to make our lives easier and
+ * ensure the least possible amount of copy-paste to reduce programmer errors.
+ *
+ * All compiler implementations end up deferring to the below macros.
+ */
+#if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)
+
+
+ #define EASTL_MSVC_ATOMIC_FETCH_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
+ ret = Intrinsic(ptr, val)
+
+ #define EASTL_MSVC_ATOMIC_EXCHANGE_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
+ ret = Intrinsic(ptr, val)
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(ret, ptr, comparand, exchange, MemoryOrder, Intrinsic) \
+ ret = Intrinsic(ptr, exchange, comparand)
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(ret, ptr, comparandResult, exchangeHigh, exchangeLow, MemoryOrder) \
+ ret = _InterlockedCompareExchange128_np(ptr, exchangeHigh, exchangeLow, comparandResult)
+
+
+#elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64)
+
+
+ #define EASTL_MSVC_INTRINSIC_RELAXED(Intrinsic) \
+ EA_PREPROCESSOR_JOIN(Intrinsic, _nf)
+
+ #define EASTL_MSVC_INTRINSIC_ACQUIRE(Intrinsic) \
+ EA_PREPROCESSOR_JOIN(Intrinsic, _acq)
+
+ #define EASTL_MSVC_INTRINSIC_RELEASE(Intrinsic) \
+ EA_PREPROCESSOR_JOIN(Intrinsic, _rel)
+
+ #define EASTL_MSVC_INTRINSIC_ACQ_REL(Intrinsic) \
+ Intrinsic
+
+ #define EASTL_MSVC_INTRINSIC_SEQ_CST(Intrinsic) \
+ Intrinsic
+
+
+ #define EASTL_MSVC_ATOMIC_FETCH_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
+ ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, val)
+
+ #define EASTL_MSVC_ATOMIC_EXCHANGE_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
+ ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, val)
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(ret, ptr, comparand, exchange, MemoryOrder, Intrinsic) \
+ ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, exchange, comparand)
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(ret, ptr, comparandResult, exchangeHigh, exchangeLow, MemoryOrder) \
+ ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(_InterlockedCompareExchange128)(ptr, exchangeHigh, exchangeLow, comparandResult)
+
+
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_MSVC_NOP_POST_INTRIN_COMPUTE(ret, lhs, rhs)
+
+#define EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE(ret, val) \
+ ret = (val)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) \
+ { \
+ integralType retIntegral; \
+ type valCompute; \
+ \
+ PRE_INTRIN_COMPUTE(valCompute, (val)); \
+ const integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, valCompute); \
+ \
+ EASTL_MSVC_ATOMIC_FETCH_OP(retIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
+ valIntegral, MemoryOrder, fetchIntrinsic); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ POST_INTRIN_COMPUTE(ret, ret, (val)); \
+ }
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_INTRIN_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ { \
+ integralType retIntegral; \
+ EASTL_MSVC_ATOMIC_EXCHANGE_OP(retIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
+ EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)), MemoryOrder, \
+ exchangeIntrinsic); \
+ \
+ ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
+ }
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) \
+ { \
+ integralType comparandIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, *(expected)); \
+ integralType oldIntegral; \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(oldIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
+ comparandIntegral, EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (desired)), \
+ MemoryOrder, cmpxchgStrongIntrinsic); \
+ \
+ if (oldIntegral == comparandIntegral) \
+ { \
+ ret = true; \
+ } \
+ else \
+ { \
+ *(expected) = EASTL_ATOMIC_TYPE_PUN_CAST(type, oldIntegral); \
+ ret = false; \
+ } \
+ }
+
+/**
+ * In my own opinion, I found the wording on Microsoft docs a little confusing.
+ * ExchangeHigh means the top 8 bytes so (ptr + 8).
+ * ExchangeLow means the low 8 butes so (ptr).
+ * Endianness does not matter since we are just loading data and comparing data.
+ * Thought of as memcpy() and memcmp() function calls whereby the layout of the
+ * data itself is irrelevant.
+ * Only after we type pun back to the original type, and load from memory does
+ * the layout of the data matter again.
+ */
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_128(type, ret, ptr, expected, desired, MemoryOrder) \
+ { \
+ union TypePun \
+ { \
+ type templateType; \
+ \
+ struct exchange128 \
+ { \
+ __int64 value[2]; \
+ }; \
+ \
+ struct exchange128 exchangePun; \
+ }; \
+ \
+ union TypePun typePun = { (desired) }; \
+ \
+ unsigned char cmpxchgRetChar; \
+ cmpxchgRetChar = EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(cmpxchgRetChar, EASTL_ATOMIC_VOLATILE_TYPE_CAST(__int64, (ptr)), \
+ EASTL_ATOMIC_TYPE_CAST(__int64, (expected)), \
+ typePun.exchangePun.value[1], typePun.exchangePun.value[0], \
+ MemoryOrder); \
+ \
+ ret = static_cast<bool>(cmpxchgRetChar); \
+ }
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE) \
+ EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, EASTL_MSVC_NOP_POST_INTRIN_COMPUTE)
+
+#define EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) \
+ EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE)
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_INTRIN_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_128(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_128(type, ret, ptr, expected, desired, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#include "compiler_msvc_fetch_add.h"
+#include "compiler_msvc_fetch_sub.h"
+
+#include "compiler_msvc_fetch_and.h"
+#include "compiler_msvc_fetch_xor.h"
+#include "compiler_msvc_fetch_or.h"
+
+#include "compiler_msvc_add_fetch.h"
+#include "compiler_msvc_sub_fetch.h"
+
+#include "compiler_msvc_and_fetch.h"
+#include "compiler_msvc_xor_fetch.h"
+#include "compiler_msvc_or_fetch.h"
+
+#include "compiler_msvc_exchange.h"
+
+#include "compiler_msvc_cmpxchg_weak.h"
+#include "compiler_msvc_cmpxchg_strong.h"
+
+#include "compiler_msvc_barrier.h"
+
+#include "compiler_msvc_cpu_pause.h"
+
+#include "compiler_msvc_signal_fence.h"
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h
new file mode 100644
index 0000000..12fc4b0
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_add_fetch.h
@@ -0,0 +1,104 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_MSVC_ADD_FETCH_POST_INTRIN_COMPUTE(ret, val, addend) \
+ ret = (val) + (addend)
+
+#define EASTL_MSVC_ATOMIC_ADD_FETCH_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_ADD_FETCH_POST_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_ADD_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_ADD_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_ADD_FETCH_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_ADD_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h
new file mode 100644
index 0000000..70ec577
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_and_fetch.h
@@ -0,0 +1,121 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8 _InterlockedAnd8_np
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16 _InterlockedAnd16_np
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32 _InterlockedAnd_np
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64 _InterlockedAnd64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8 _InterlockedAnd8
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16 _InterlockedAnd16
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32 _InterlockedAnd
+ #define EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64 _InterlockedAnd64
+
+#endif
+
+
+#define EASTL_MSVC_AND_FETCH_POST_INTRIN_COMPUTE(ret, val, andend) \
+ ret = (val) & (andend)
+
+#define EASTL_MSVC_ATOMIC_AND_FETCH_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_AND_FETCH_POST_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_N(char, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_N(short, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_N(long, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_N(__int64, EASTL_MSVC_ATOMIC_AND_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_AND_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_AND_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_AND_FETCH_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_AND_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h
new file mode 100644
index 0000000..02e2d03
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_barrier.h
@@ -0,0 +1,31 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER()
+//
+#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER() \
+ _ReadWriteBarrier()
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(const T&, type)
+//
+#define EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY(val, type) \
+ EASTL_COMPILER_ATOMIC_COMPILER_BARRIER_DATA_DEPENDENCY_FUNC(const_cast<type*>(eastl::addressof((val)))); \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_BARRIER_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h
new file mode 100644
index 0000000..42117a1
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_strong.h
@@ -0,0 +1,195 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8 _InterlockedCompareExchange8
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16 _InterlockedCompareExchange16_np
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32 _InterlockedCompareExchange_np
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64 _InterlockedCompareExchange64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8 _InterlockedCompareExchange8
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16 _InterlockedCompareExchange16
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32 _InterlockedCompareExchange
+ #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64 _InterlockedCompareExchange64
+
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(char, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_8, type, ret, ptr, expected, desired, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(short, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_16, type, ret, ptr, expected, desired, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(long, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_32, type, ret, ptr, expected, desired, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(__int64, EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_64, type, ret, ptr, expected, desired, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_128(type, ret, ptr, expected, desired, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_8(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_16(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_32(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_64(type, ret, ptr, expected, desired, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128(type, ret, ptr, expected, desired, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_STRONG_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h
new file mode 100644
index 0000000..8f4147a
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cmpxchg_weak.h
@@ -0,0 +1,162 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_*_*_N(type, bool ret, type * ptr, type * expected, type desired)
+//
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELAXED_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELAXED_RELAXED_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_RELAXED_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQUIRE_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_RELEASE_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_RELEASE_RELAXED_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_RELAXED_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_ACQ_REL_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_RELAXED_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_ACQUIRE_128(type, ret, ptr, expected, desired)
+
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_8(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_16(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_32(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_64(type, ret, ptr, expected, desired)
+
+#define EASTL_COMPILER_ATOMIC_CMPXCHG_WEAK_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired) \
+ EASTL_COMPILER_ATOMIC_CMPXCHG_STRONG_SEQ_CST_SEQ_CST_128(type, ret, ptr, expected, desired)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CMPXCHG_WEAK_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h
new file mode 100644
index 0000000..720701a
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_cpu_pause.h
@@ -0,0 +1,27 @@
+/////////////////////////////////////////////////////////////////////////////////
+// copyright (c) electronic arts inc. all rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_CPU_PAUSE()
+//
+// NOTE:
+// Rather obscure macro in Windows.h that expands to pause or rep; nop on
+// compatible x86 cpus or the arm yield on compatible arm processors.
+// This is nicer than switching on platform specific intrinsics.
+//
+#define EASTL_COMPILER_ATOMIC_CPU_PAUSE() \
+ YieldProcessor()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_CPU_PAUSE_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h
new file mode 100644
index 0000000..323f1fa
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_exchange.h
@@ -0,0 +1,125 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(char, _InterlockedExchange8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(short, _InterlockedExchange16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(long, _InterlockedExchange, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(__int64, _InterlockedExchange64, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \
+ { \
+ bool cmpxchgRet; \
+ /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \
+ /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \
+ /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \
+ /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \
+ ret = *(ptr); \
+ do \
+ { \
+ EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), val); \
+ } while (!cmpxchgRet); \
+ }
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_64(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_EXCHANGE_128(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_EXCHANGE_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h
new file mode 100644
index 0000000..a951740
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_add.h
@@ -0,0 +1,101 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_ADD_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, addIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_ADD_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_ADD_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_ADD_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_ADD_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h
new file mode 100644
index 0000000..96f7894
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_and.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8 _InterlockedAnd8_np
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16 _InterlockedAnd16_np
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32 _InterlockedAnd_np
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64 _InterlockedAnd64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8 _InterlockedAnd8
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16 _InterlockedAnd16
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32 _InterlockedAnd
+ #define EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64 _InterlockedAnd64
+
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_AND_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, andIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_N(char, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_N(short, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_N(long, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_N(__int64, EASTL_MSVC_ATOMIC_FETCH_AND_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_AND_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_AND_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_AND_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_AND_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h
new file mode 100644
index 0000000..2792fc3
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_or.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8 _InterlockedOr8_np
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16 _InterlockedOr16_np
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32 _InterlockedOr_np
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64 _InterlockedOr64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8 _InterlockedOr8
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16 _InterlockedOr16
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32 _InterlockedOr
+ #define EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64 _InterlockedOr64
+
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_OR_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_N(char, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_N(short, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_N(long, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_N(long long, EASTL_MSVC_ATOMIC_FETCH_OR_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_OR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_OR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_OR_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_OR_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h
new file mode 100644
index 0000000..6d5d9e3
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_sub.h
@@ -0,0 +1,104 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_MSVC_FETCH_SUB_PRE_INTRIN_COMPUTE(ret, val) \
+ ret = EASTL_ATOMIC_NEGATE_OPERAND((val))
+
+#define EASTL_MSVC_ATOMIC_FETCH_SUB_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_FETCH_SUB_PRE_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_SUB_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_SUB_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_SUB_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_SUB_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h
new file mode 100644
index 0000000..371153e
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_fetch_xor.h
@@ -0,0 +1,118 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8 _InterlockedXor8_np
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16 _InterlockedXor16_np
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32 _InterlockedXor_np
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64 _InterlockedXor64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8 _InterlockedXor8
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16 _InterlockedXor16
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32 _InterlockedXor
+ #define EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64 _InterlockedXor64
+
+#endif
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_XOR_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_N(char, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_N(short, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_N(long, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_N(__int64, EASTL_MSVC_ATOMIC_FETCH_XOR_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_FETCH_XOR_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_FETCH_XOR_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_FETCH_XOR_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_FETCH_XOR_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h
new file mode 100644
index 0000000..c5b5fac
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_or_fetch.h
@@ -0,0 +1,121 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8 _InterlockedOr8_np
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16 _InterlockedOr16_np
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32 _InterlockedOr_np
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64 _InterlockedOr64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8 _InterlockedOr8
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16 _InterlockedOr16
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32 _InterlockedOr
+ #define EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64 _InterlockedOr64
+
+#endif
+
+
+#define EASTL_MSVC_OR_FETCH_POST_INTRIN_COMPUTE(ret, val, orend) \
+ ret = (val) | (orend)
+
+#define EASTL_MSVC_ATOMIC_OR_FETCH_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, orIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_OR_FETCH_POST_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_N(char, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_N(short, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_N(long, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_N(__int64, EASTL_MSVC_ATOMIC_OR_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_OR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_OR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_OR_FETCH_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_OR_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h
new file mode 100644
index 0000000..f35f577
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_signal_fence.h
@@ -0,0 +1,34 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_*()
+//
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELAXED() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQUIRE() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_RELEASE() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_ACQ_REL() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+#define EASTL_COMPILER_ATOMIC_SIGNAL_FENCE_SEQ_CST() \
+ EASTL_ATOMIC_COMPILER_BARRIER()
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SIGNAL_FENCE_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h
new file mode 100644
index 0000000..6fb61e2
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_sub_fetch.h
@@ -0,0 +1,107 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#define EASTL_MSVC_SUB_FETCH_PRE_INTRIN_COMPUTE(ret, val) \
+ ret = EASTL_ATOMIC_NEGATE_OPERAND((val))
+
+#define EASTL_MSVC_SUB_FETCH_POST_INTRIN_COMPUTE(ret, val, subend) \
+ ret = (val) - (subend)
+
+#define EASTL_MSVC_ATOMIC_SUB_FETCH_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, subIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_SUB_FETCH_PRE_INTRIN_COMPUTE, EASTL_MSVC_SUB_FETCH_POST_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_N(char, _InterlockedExchangeAdd8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_N(short, _InterlockedExchangeAdd16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_N(long, _InterlockedExchangeAdd, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_N(__int64, _InterlockedExchangeAdd64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_SUB_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_SUB_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_SUB_FETCH_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_SUB_FETCH_H */
diff --git a/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h
new file mode 100644
index 0000000..44ffff9
--- /dev/null
+++ b/include/EASTL/internal/atomic/compiler/msvc/compiler_msvc_xor_fetch.h
@@ -0,0 +1,121 @@
+/////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H
+#define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+#if defined(EA_PROCESSOR_X86_64)
+
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8 _InterlockedXor8_np
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16 _InterlockedXor16_np
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32 _InterlockedXor_np
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64 _InterlockedXor64_np
+
+#else
+
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8 _InterlockedXor8
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16 _InterlockedXor16
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32 _InterlockedXor
+ #define EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64 _InterlockedXor64
+
+#endif
+
+
+#define EASTL_MSVC_XOR_FETCH_POST_INTRIN_COMPUTE(ret, val, xorend) \
+ ret = (val) ^ (xorend)
+
+#define EASTL_MSVC_ATOMIC_XOR_FETCH_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, xorIntrinsic, type, ret, ptr, val, MemoryOrder, \
+ EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE, EASTL_MSVC_XOR_FETCH_POST_INTRIN_COMPUTE)
+
+
+#define EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_N(char, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_8, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_N(short, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_16, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_N(long, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_32, type, ret, ptr, val, MemoryOrder)
+
+#define EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, MemoryOrder) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_N(__int64, EASTL_MSVC_ATOMIC_XOR_FETCH_INTRIN_64, type, ret, ptr, val, MemoryOrder)
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// void EASTL_COMPILER_ATOMIC_XOR_FETCH_*_N(type, type ret, type * ptr, type val)
+//
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, RELAXED)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELAXED_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, RELAXED)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, ACQUIRE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQUIRE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, ACQUIRE)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, RELEASE)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_RELEASE_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, RELEASE)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, ACQ_REL)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_ACQ_REL_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, ACQ_REL)
+
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_8(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_8(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_16(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_16(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_32(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_32(type, ret, ptr, val, SEQ_CST)
+
+#define EASTL_COMPILER_ATOMIC_XOR_FETCH_SEQ_CST_64(type, ret, ptr, val) \
+ EASTL_MSVC_ATOMIC_XOR_FETCH_64(type, ret, ptr, val, SEQ_CST)
+
+
+#endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_XOR_FETCH_H */
diff --git a/include/EASTL/internal/char_traits.h b/include/EASTL/internal/char_traits.h
new file mode 100644
index 0000000..62fe79b
--- /dev/null
+++ b/include/EASTL/internal/char_traits.h
@@ -0,0 +1,464 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements similar functionality to char_traits which is part of
+// the C++ standard STL library specification. This is intended for internal
+// EASTL use only. Functionality can be accessed through the eastl::string or
+// eastl::string_view types.
+//
+// http://en.cppreference.com/w/cpp/string/char_traits
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_CHAR_TRAITS_H
+#define EASTL_CHAR_TRAITS_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <ctype.h> // toupper, etc.
+#include <string.h> // memset, etc.
+EA_RESTORE_ALL_VC_WARNINGS()
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////////////
+ /// DecodePart
+ ///
+ /// These implement UTF8/UCS2/UCS4 encoding/decoding.
+ ///
+ EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char*& pDest, char* pDestEnd);
+ EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char*& pDest, char* pDestEnd);
+ EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char*& pDest, char* pDestEnd);
+ EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char*& pDest, char* pDestEnd);
+ EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ #if EA_CHAR8_UNIQUE
+ bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd);
+
+ bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char*& pDest, char* pDestEnd);
+ bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ bool DecodePart(const char*& pSrc, const char* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd);
+ bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd);
+ bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd);
+ #endif
+
+ #if EA_WCHAR_UNIQUE
+ bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd);
+
+ bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char*& pDest, char* pDestEnd);
+ bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd);
+ bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd);
+
+ bool DecodePart(const char*& pSrc, const char* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd);
+ bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd);
+ bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd);
+ #endif
+
+ #if EA_CHAR8_UNIQUE && EA_WCHAR_UNIQUE
+ bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd);
+ bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd);
+ #endif
+
+
+ #if EA_WCHAR_UNIQUE
+ inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd)
+ {
+ return DecodePart(reinterpret_cast<const char*&>(pSrc), reinterpret_cast<const char*>(pSrcEnd), reinterpret_cast<char*&>(pDest), reinterpret_cast<char*&>(pDestEnd));
+ }
+
+ inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char*& pDest, char* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(reinterpret_cast<const char16_t*&>(pSrc), reinterpret_cast<const char16_t*>(pSrcEnd), pDest, pDestEnd);
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(reinterpret_cast<const char32_t*&>(pSrc), reinterpret_cast<const char32_t*>(pSrcEnd), pDest, pDestEnd);
+ #endif
+ }
+
+ inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(reinterpret_cast<const char16_t*&>(pSrc), reinterpret_cast<const char16_t*>(pSrcEnd), pDest, pDestEnd);
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(reinterpret_cast<const char32_t*&>(pSrc), reinterpret_cast<const char32_t*>(pSrcEnd), pDest, pDestEnd);
+ #endif
+ }
+
+ inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(reinterpret_cast<const char16_t*&>(pSrc), reinterpret_cast<const char16_t*>(pSrcEnd), pDest, pDestEnd);
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(reinterpret_cast<const char32_t*&>(pSrc), reinterpret_cast<const char32_t*>(pSrcEnd), pDest, pDestEnd);
+ #endif
+ }
+
+ inline bool DecodePart(const char*& pSrc, const char* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char16_t*&>(pDest), reinterpret_cast<char16_t*>(pDestEnd));
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char32_t*&>(pDest), reinterpret_cast<char32_t*>(pDestEnd));
+ #endif
+ }
+
+ inline bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char16_t*&>(pDest), reinterpret_cast<char16_t*>(pDestEnd));
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char32_t*&>(pDest), reinterpret_cast<char32_t*>(pDestEnd));
+ #endif
+ }
+
+ inline bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char16_t*&>(pDest), reinterpret_cast<char16_t*>(pDestEnd));
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char32_t*&>(pDest), reinterpret_cast<char32_t*>(pDestEnd));
+ #endif
+ }
+ #endif
+
+ #if EA_CHAR8_UNIQUE
+ inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd)
+ {
+ return DecodePart(reinterpret_cast<const char*&>(pSrc), reinterpret_cast<const char*>(pSrcEnd), reinterpret_cast<char*&>(pDest), reinterpret_cast<char*&>(pDestEnd));
+ }
+
+ inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char*& pDest, char* pDestEnd)
+ {
+ return DecodePart(reinterpret_cast<const char*&>(pSrc), reinterpret_cast<const char*>(pSrcEnd), pDest, pDestEnd);
+ }
+
+ inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd)
+ {
+ return DecodePart(reinterpret_cast<const char*&>(pSrc), reinterpret_cast<const char*>(pSrcEnd), pDest, pDestEnd);
+ }
+
+ inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd)
+ {
+ return DecodePart(reinterpret_cast<const char*&>(pSrc), reinterpret_cast<const char*>(pSrcEnd), pDest, pDestEnd);
+ }
+
+ inline bool DecodePart(const char*& pSrc, const char* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd)
+ {
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char*&>(pDest), reinterpret_cast<char*&>(pDestEnd));
+ }
+
+ inline bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd)
+ {
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char*&>(pDest), reinterpret_cast<char*&>(pDestEnd));
+ }
+
+ inline bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd)
+ {
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char*&>(pDest), reinterpret_cast<char*&>(pDestEnd));
+ }
+ #endif
+
+ #if EA_CHAR8_UNIQUE && EA_WCHAR_UNIQUE
+ inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char16_t*&>(pDest), reinterpret_cast<char16_t*>(pDestEnd));
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(pSrc, pSrcEnd, reinterpret_cast<char32_t*&>(pDest), reinterpret_cast<char32_t*>(pDestEnd));
+ #endif
+ }
+
+ inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd)
+ {
+ #if (EA_WCHAR_SIZE == 2)
+ return DecodePart(reinterpret_cast<const char16_t*&>(pSrc), reinterpret_cast<const char16_t*>(pSrcEnd), reinterpret_cast<char*&>(pDest), reinterpret_cast<char*>(pDestEnd));
+ #elif (EA_WCHAR_SIZE == 4)
+ return DecodePart(reinterpret_cast<const char32_t*&>(pSrc), reinterpret_cast<const char32_t*>(pSrcEnd), reinterpret_cast<char*&>(pDest), reinterpret_cast<char*>(pDestEnd));
+ #endif
+ }
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 'char traits' functionality
+ //
+ inline char CharToLower(char c)
+ { return (char)tolower((uint8_t)c); }
+
+ template<typename T>
+ inline T CharToLower(T c)
+ { if((unsigned)c <= 0xff) return (T)tolower((uint8_t)c); return c; }
+
+
+ inline char CharToUpper(char c)
+ { return (char)toupper((uint8_t)c); }
+
+ template<typename T>
+ inline T CharToUpper(T c)
+ { if((unsigned)c <= 0xff) return (T)toupper((uint8_t)c); return c; }
+
+
+ template <typename T>
+ int Compare(const T* p1, const T* p2, size_t n)
+ {
+ for(; n > 0; ++p1, ++p2, --n)
+ {
+ if(*p1 != *p2)
+ return (static_cast<typename make_unsigned<T>::type>(*p1) <
+ static_cast<typename make_unsigned<T>::type>(*p2)) ? -1 : 1;
+ }
+ return 0;
+ }
+
+ inline int Compare(const char* p1, const char* p2, size_t n)
+ {
+ return memcmp(p1, p2, n);
+ }
+
+
+ template <typename T>
+ inline int CompareI(const T* p1, const T* p2, size_t n)
+ {
+ for(; n > 0; ++p1, ++p2, --n)
+ {
+ const T c1 = CharToLower(*p1);
+ const T c2 = CharToLower(*p2);
+
+ if(c1 != c2)
+ return (static_cast<typename make_unsigned<T>::type>(c1) <
+ static_cast<typename make_unsigned<T>::type>(c2)) ? -1 : 1;
+ }
+ return 0;
+ }
+
+
+ template<typename T>
+ inline const T* Find(const T* p, T c, size_t n)
+ {
+ for(; n > 0; --n, ++p)
+ {
+ if(*p == c)
+ return p;
+ }
+
+ return NULL;
+ }
+
+ inline const char* Find(const char* p, char c, size_t n)
+ {
+ return (const char*)memchr(p, c, n);
+ }
+
+
+ template<typename T>
+ inline EA_CPP14_CONSTEXPR size_t CharStrlen(const T* p)
+ {
+ const auto* pCurrent = p;
+ while(*pCurrent)
+ ++pCurrent;
+ return (size_t)(pCurrent - p);
+ }
+
+
+ template <typename T>
+ inline T* CharStringUninitializedCopy(const T* pSource, const T* pSourceEnd, T* pDestination)
+ {
+ memmove(pDestination, pSource, (size_t)(pSourceEnd - pSource) * sizeof(T));
+ return pDestination + (pSourceEnd - pSource);
+ }
+
+
+ template <typename T>
+ const T* CharTypeStringFindEnd(const T* pBegin, const T* pEnd, T c)
+ {
+ const T* pTemp = pEnd;
+ while(--pTemp >= pBegin)
+ {
+ if(*pTemp == c)
+ return pTemp;
+ }
+
+ return pEnd;
+ }
+
+
+ template <typename T>
+ const T* CharTypeStringRSearch(const T* p1Begin, const T* p1End,
+ const T* p2Begin, const T* p2End)
+ {
+ // Test for zero length strings, in which case we have a match or a failure,
+ // but the return value is the same either way.
+ if((p1Begin == p1End) || (p2Begin == p2End))
+ return p1Begin;
+
+ // Test for a pattern of length 1.
+ if((p2Begin + 1) == p2End)
+ return CharTypeStringFindEnd(p1Begin, p1End, *p2Begin);
+
+ // Test for search string length being longer than string length.
+ if((p2End - p2Begin) > (p1End - p1Begin))
+ return p1End;
+
+ // General case.
+ const T* pSearchEnd = (p1End - (p2End - p2Begin) + 1);
+ const T* pCurrent1;
+ const T* pCurrent2;
+
+ while(pSearchEnd != p1Begin)
+ {
+ // Search for the last occurrence of *p2Begin.
+ pCurrent1 = CharTypeStringFindEnd(p1Begin, pSearchEnd, *p2Begin);
+ if(pCurrent1 == pSearchEnd) // If the first char of p2 wasn't found,
+ return p1End; // then we immediately have failure.
+
+ // In this case, *pTemp == *p2Begin. So compare the rest.
+ pCurrent2 = p2Begin;
+ while(*pCurrent1++ == *pCurrent2++)
+ {
+ if(pCurrent2 == p2End)
+ return (pCurrent1 - (p2End - p2Begin));
+ }
+
+ // A smarter algorithm might know to subtract more than just one,
+ // but in most cases it won't make much difference anyway.
+ --pSearchEnd;
+ }
+
+ return p1End;
+ }
+
+
+ template <typename T>
+ inline const T* CharTypeStringFindFirstOf(const T* p1Begin, const T* p1End, const T* p2Begin, const T* p2End)
+ {
+ for (; p1Begin != p1End; ++p1Begin)
+ {
+ for (const T* pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if (*p1Begin == *pTemp)
+ return p1Begin;
+ }
+ }
+ return p1End;
+ }
+
+
+ template <typename T>
+ inline const T* CharTypeStringRFindFirstNotOf(const T* p1RBegin, const T* p1REnd, const T* p2Begin, const T* p2End)
+ {
+ for (; p1RBegin != p1REnd; --p1RBegin)
+ {
+ const T* pTemp;
+ for (pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if (*(p1RBegin - 1) == *pTemp)
+ break;
+ }
+ if (pTemp == p2End)
+ return p1RBegin;
+ }
+ return p1REnd;
+ }
+
+
+ template <typename T>
+ inline const T* CharTypeStringFindFirstNotOf(const T* p1Begin, const T* p1End, const T* p2Begin, const T* p2End)
+ {
+ for (; p1Begin != p1End; ++p1Begin)
+ {
+ const T* pTemp;
+ for (pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if (*p1Begin == *pTemp)
+ break;
+ }
+ if (pTemp == p2End)
+ return p1Begin;
+ }
+ return p1End;
+ }
+
+
+ template <typename T>
+ inline const T* CharTypeStringRFindFirstOf(const T* p1RBegin, const T* p1REnd, const T* p2Begin, const T* p2End)
+ {
+ for (; p1RBegin != p1REnd; --p1RBegin)
+ {
+ for (const T* pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if (*(p1RBegin - 1) == *pTemp)
+ return p1RBegin;
+ }
+ }
+ return p1REnd;
+ }
+
+
+ template <typename T>
+ inline const T* CharTypeStringRFind(const T* pRBegin, const T* pREnd, const T c)
+ {
+ while (pRBegin > pREnd)
+ {
+ if (*(pRBegin - 1) == c)
+ return pRBegin;
+ --pRBegin;
+ }
+ return pREnd;
+ }
+
+
+ inline char* CharStringUninitializedFillN(char* pDestination, size_t n, const char c)
+ {
+ if(n) // Some compilers (e.g. GCC 4.3+) generate a warning (which can't be disabled) if you call memset with a size of 0.
+ memset(pDestination, (uint8_t)c, (size_t)n);
+ return pDestination + n;
+ }
+
+ template<typename T>
+ inline T* CharStringUninitializedFillN(T* pDestination, size_t n, const T c)
+ {
+ T * pDest = pDestination;
+ const T* const pEnd = pDestination + n;
+ while(pDest < pEnd)
+ *pDest++ = c;
+ return pDestination + n;
+ }
+
+
+ inline char* CharTypeAssignN(char* pDestination, size_t n, char c)
+ {
+ if(n) // Some compilers (e.g. GCC 4.3+) generate a warning (which can't be disabled) if you call memset with a size of 0.
+ return (char*)memset(pDestination, c, (size_t)n);
+ return pDestination;
+ }
+
+ template<typename T>
+ inline T* CharTypeAssignN(T* pDestination, size_t n, T c)
+ {
+ T* pDest = pDestination;
+ const T* const pEnd = pDestination + n;
+ while(pDest < pEnd)
+ *pDest++ = c;
+ return pDestination;
+ }
+} // namespace eastl
+
+#endif // EASTL_CHAR_TRAITS_H
diff --git a/include/EASTL/internal/config.h b/include/EASTL/internal/config.h
new file mode 100644
index 0000000..530bbc8
--- /dev/null
+++ b/include/EASTL/internal/config.h
@@ -0,0 +1,1877 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_CONFIG_H
+#define EASTL_INTERNAL_CONFIG_H
+
+
+///////////////////////////////////////////////////////////////////////////////
+// ReadMe
+//
+// This is the EASTL configuration file. All configurable parameters of EASTL
+// are controlled through this file. However, all the settings here can be
+// manually overridden by the user. There are three ways for a user to override
+// the settings in this file:
+//
+// - Simply edit this file.
+// - Define EASTL_USER_CONFIG_HEADER.
+// - Predefine individual defines (e.g. EASTL_ASSERT).
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_USER_CONFIG_HEADER
+//
+// This allows the user to define a header file to be #included before the
+// EASTL config.h contents are compiled. A primary use of this is to override
+// the contents of this config.h file. Note that all the settings below in
+// this file are user-overridable.
+//
+// Example usage:
+// #define EASTL_USER_CONFIG_HEADER "MyConfigOverrides.h"
+// #include <EASTL/vector.h>
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef EASTL_USER_CONFIG_HEADER
+ #include EASTL_USER_CONFIG_HEADER
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EABASE_DISABLED
+//
+// The user can disable EABase usage and manually supply the configuration
+// via defining EASTL_EABASE_DISABLED and defining the appropriate entities
+// globally or via the above EASTL_USER_CONFIG_HEADER.
+//
+// Example usage:
+// #define EASTL_EABASE_DISABLED
+// #include <EASTL/vector.h>
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_EABASE_DISABLED
+ #include <EABase/eabase.h>
+#endif
+#include <EABase/eahave.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VERSION
+//
+// We more or less follow the conventional EA packaging approach to versioning
+// here. A primary distinction here is that minor versions are defined as two
+// digit entities (e.g. .03") instead of minimal digit entities ".3"). The logic
+// here is that the value is a counter and not a floating point fraction.
+// Note that the major version doesn't have leading zeros.
+//
+// Example version strings:
+// "0.91.00" // Major version 0, minor version 91, patch version 0.
+// "1.00.00" // Major version 1, minor and patch version 0.
+// "3.10.02" // Major version 3, minor version 10, patch version 02.
+// "12.03.01" // Major version 12, minor version 03, patch version
+//
+// Example usage:
+// printf("EASTL version: %s", EASTL_VERSION);
+// printf("EASTL version: %d.%d.%d", EASTL_VERSION_N / 10000 % 100, EASTL_VERSION_N / 100 % 100, EASTL_VERSION_N % 100);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VERSION
+ #define EASTL_VERSION "3.17.06"
+ #define EASTL_VERSION_N 31706
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+//
+// Defined as 1 or undefined.
+// Implements support for the definition of EA_COMPILER_NO_STANDARD_CPP_LIBRARY for the case
+// of using EABase versions prior to the addition of its EA_COMPILER_NO_STANDARD_CPP_LIBRARY support.
+//
+#if !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY)
+ #if defined(EA_PLATFORM_ANDROID)
+ // Disabled because EA's eaconfig/android_config/android_sdk packages currently
+ // don't support linking STL libraries. Perhaps we can figure out what linker arguments
+ // are needed for an app so we can manually specify them and then re-enable this code.
+ //
+ //#include <android/api-level.h>
+ //
+ //#if (__ANDROID_API__ < 9) // Earlier versions of Android provide no std C++ STL implementation.
+ #define EA_COMPILER_NO_STANDARD_CPP_LIBRARY 1
+ //#endif
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_NOEXCEPT
+//
+// Defined as a macro. Provided here for backward compatibility with older
+// EABase versions prior to 2.00.40 that don't yet define it themselves.
+//
+#if !defined(EA_NOEXCEPT)
+ #define EA_NOEXCEPT
+ #define EA_NOEXCEPT_IF(predicate)
+ #define EA_NOEXCEPT_EXPR(expression) false
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_CPP14_CONSTEXPR
+//
+// Defined as constexpr when a C++14 compiler is present. Defines it as nothing
+// when using a C++11 compiler.
+// C++14 relaxes the specification for constexpr such that it allows more
+// kinds of expressions. Since a C++11 compiler doesn't allow this, we need
+// to make a unique define for C++14 constexpr. This macro should be used only
+// when you are using it with code that specfically requires C++14 constexpr
+// functionality beyond the regular C++11 constexpr functionality.
+// http://en.wikipedia.org/wiki/C%2B%2B14#Relaxed_constexpr_restrictions
+//
+#if !defined(EA_CPP14_CONSTEXPR)
+ #if defined(EA_COMPILER_CPP14_ENABLED)
+ #define EA_CPP14_CONSTEXPR constexpr
+ #else
+ #define EA_CPP14_CONSTEXPR // not supported
+ #define EA_NO_CPP14_CONSTEXPR
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL namespace
+//
+// We define this so that users that #include this config file can reference
+// these namespaces without seeing any other files that happen to use them.
+///////////////////////////////////////////////////////////////////////////////
+
+/// EA Standard Template Library
+namespace eastl
+{
+ // Intentionally empty.
+}
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEBUG
+//
+// Defined as an integer >= 0. Default is 1 for debug builds and 0 for
+// release builds. This define is also a master switch for the default value
+// of some other settings.
+//
+// Example usage:
+// #if EASTL_DEBUG
+// ...
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_DEBUG
+ #if defined(EA_DEBUG) || defined(_DEBUG)
+ #define EASTL_DEBUG 1
+ #else
+ #define EASTL_DEBUG 0
+ #endif
+#endif
+
+// Developer debug. Helps EASTL developers assert EASTL is coded correctly.
+// Normally disabled for users since it validates internal things and not user things.
+#ifndef EASTL_DEV_DEBUG
+ #define EASTL_DEV_DEBUG 0
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEBUGPARAMS_LEVEL
+//
+// EASTL_DEBUGPARAMS_LEVEL controls what debug information is passed through to
+// the allocator by default.
+// This value may be defined by the user ... if not it will default to 1 for
+// EA_DEBUG builds, otherwise 0.
+//
+// 0 - no debug information is passed through to allocator calls.
+// 1 - 'name' is passed through to allocator calls.
+// 2 - 'name', __FILE__, and __LINE__ are passed through to allocator calls.
+//
+// This parameter mirrors the equivalent parameter in the CoreAllocator package.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_DEBUGPARAMS_LEVEL
+ #if EASTL_DEBUG
+ #define EASTL_DEBUGPARAMS_LEVEL 2
+ #else
+ #define EASTL_DEBUGPARAMS_LEVEL 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DLL
+//
+// Defined as 0 or 1. The default is dependent on the definition of EA_DLL.
+// If EA_DLL is defined, then EASTL_DLL is 1, else EASTL_DLL is 0.
+// EA_DLL is a define that controls DLL builds within the EAConfig build system.
+// EASTL_DLL controls whether EASTL is built and used as a DLL.
+// Normally you wouldn't do such a thing, but there are use cases for such
+// a thing, particularly in the case of embedding C++ into C# applications.
+//
+#ifndef EASTL_DLL
+ #if defined(EA_DLL)
+ #define EASTL_DLL 1
+ #else
+ #define EASTL_DLL 0
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_IF_NOT_DLL
+//
+// Utility to include expressions only for static builds.
+//
+#ifndef EASTL_IF_NOT_DLL
+ #if EASTL_DLL
+ #define EASTL_IF_NOT_DLL(x)
+ #else
+ #define EASTL_IF_NOT_DLL(x) x
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_API
+//
+// This is used to label functions as DLL exports under Microsoft platforms.
+// If EA_DLL is defined, then the user is building EASTL as a DLL and EASTL's
+// non-templated functions will be exported. EASTL template functions are not
+// labelled as EASTL_API (and are thus not exported in a DLL build). This is
+// because it's not possible (or at least unsafe) to implement inline templated
+// functions in a DLL.
+//
+// Example usage of EASTL_API:
+// EASTL_API int someVariable = 10; // Export someVariable in a DLL build.
+//
+// struct EASTL_API SomeClass{ // Export SomeClass and its member functions in a DLL build.
+// EASTL_LOCAL void PrivateMethod(); // Not exported.
+// };
+//
+// EASTL_API void SomeFunction(); // Export SomeFunction in a DLL build.
+//
+//
+#if defined(EA_DLL) && !defined(EASTL_DLL)
+ #define EASTL_DLL 1
+#endif
+
+#ifndef EASTL_API // If the build file hasn't already defined this to be dllexport...
+ #if EASTL_DLL
+ #if defined(_MSC_VER)
+ #define EASTL_API __declspec(dllimport)
+ #define EASTL_LOCAL
+ #elif defined(__CYGWIN__)
+ #define EASTL_API __attribute__((dllimport))
+ #define EASTL_LOCAL
+ #elif (defined(__GNUC__) && (__GNUC__ >= 4))
+ #define EASTL_API __attribute__ ((visibility("default")))
+ #define EASTL_LOCAL __attribute__ ((visibility("hidden")))
+ #else
+ #define EASTL_API
+ #define EASTL_LOCAL
+ #endif
+ #else
+ #define EASTL_API
+ #define EASTL_LOCAL
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EASTDC_API
+//
+// This is used for importing EAStdC functions into EASTL, possibly via a DLL import.
+//
+#ifndef EASTL_EASTDC_API
+ #if EASTL_DLL
+ #if defined(_MSC_VER)
+ #define EASTL_EASTDC_API __declspec(dllimport)
+ #define EASTL_EASTDC_LOCAL
+ #elif defined(__CYGWIN__)
+ #define EASTL_EASTDC_API __attribute__((dllimport))
+ #define EASTL_EASTDC_LOCAL
+ #elif (defined(__GNUC__) && (__GNUC__ >= 4))
+ #define EASTL_EASTDC_API __attribute__ ((visibility("default")))
+ #define EASTL_EASTDC_LOCAL __attribute__ ((visibility("hidden")))
+ #else
+ #define EASTL_EASTDC_API
+ #define EASTL_EASTDC_LOCAL
+ #endif
+ #else
+ #define EASTL_EASTDC_API
+ #define EASTL_EASTDC_LOCAL
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EASTDC_VSNPRINTF
+//
+// Defined as 0 or 1. By default it is 1.
+//
+// When enabled EASTL uses EAStdC's Vsnprintf function directly instead of
+// having the user provide a global Vsnprintf8/16/32 function. The benefit
+// of this is that it will allow EASTL to just link to EAStdC's Vsnprintf
+// without the user doing anything. The downside is that any users who aren't
+// already using EAStdC will either need to now depend on EAStdC or globally
+// define this property to be 0 and simply provide functions that have the same
+// names. See the usage of EASTL_EASTDC_VSNPRINTF in string.h for more info.
+//
+#if !defined(EASTL_EASTDC_VSNPRINTF)
+ #define EASTL_EASTDC_VSNPRINTF 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NAME_ENABLED / EASTL_NAME / EASTL_NAME_VAL
+//
+// Used to wrap debug string names. In a release build, the definition
+// goes away. These are present to avoid release build compiler warnings
+// and to make code simpler.
+//
+// Example usage of EASTL_NAME:
+// // pName will defined away in a release build and thus prevent compiler warnings.
+// void allocator::set_name(const char* EASTL_NAME(pName))
+// {
+// #if EASTL_NAME_ENABLED
+// mpName = pName;
+// #endif
+// }
+//
+// Example usage of EASTL_NAME_VAL:
+// // "xxx" is defined to NULL in a release build.
+// vector<T, Allocator>::vector(const allocator_type& allocator = allocator_type(EASTL_NAME_VAL("xxx")));
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_NAME_ENABLED
+ #define EASTL_NAME_ENABLED EASTL_DEBUG
+#endif
+
+#ifndef EASTL_NAME
+ #if EASTL_NAME_ENABLED
+ #define EASTL_NAME(x) x
+ #define EASTL_NAME_VAL(x) x
+ #else
+ #define EASTL_NAME(x)
+ #define EASTL_NAME_VAL(x) ((const char*)NULL)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEFAULT_NAME_PREFIX
+//
+// Defined as a string literal. Defaults to "EASTL".
+// This define is used as the default name for EASTL where such a thing is
+// referenced in EASTL. For example, if the user doesn't specify an allocator
+// name for their deque, it is named "EASTL deque". However, you can override
+// this to say "SuperBaseball deque" by changing EASTL_DEFAULT_NAME_PREFIX.
+//
+// Example usage (which is simply taken from how deque.h uses this define):
+// #ifndef EASTL_DEQUE_DEFAULT_NAME
+// #define EASTL_DEQUE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " deque"
+// #endif
+//
+#ifndef EASTL_DEFAULT_NAME_PREFIX
+ #define EASTL_DEFAULT_NAME_PREFIX "EASTL"
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ASSERT_ENABLED
+//
+// Defined as 0 or non-zero. Default is same as EASTL_DEBUG.
+// If EASTL_ASSERT_ENABLED is non-zero, then asserts will be executed via
+// the assertion mechanism.
+//
+// Example usage:
+// #if EASTL_ASSERT_ENABLED
+// EASTL_ASSERT(v.size() > 17);
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ASSERT_ENABLED
+ #define EASTL_ASSERT_ENABLED EASTL_DEBUG
+#endif
+
+// Developer assert. Helps EASTL developers assert EASTL is coded correctly.
+// Normally disabled for users since it validates internal things and not user things.
+#ifndef EASTL_DEV_ASSERT_ENABLED
+ #define EASTL_DEV_ASSERT_ENABLED EASTL_DEV_DEBUG
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+//
+// Defined as 0 or non-zero. Default is same as EASTL_ASSERT_ENABLED.
+// This is like EASTL_ASSERT_ENABLED, except it is for empty container
+// references. Sometime people like to be able to take a reference to
+// the front of the container, but not use it if the container is empty.
+// In practice it's often easier and more efficient to do this than to write
+// extra code to check if the container is empty.
+//
+// NOTE: If this is enabled, EASTL_ASSERT_ENABLED must also be enabled
+//
+// Example usage:
+// template <typename T, typename Allocator>
+// inline typename vector<T, Allocator>::reference
+// vector<T, Allocator>::front()
+// {
+// #if EASTL_ASSERT_ENABLED
+// EASTL_ASSERT(mpEnd > mpBegin);
+// #endif
+//
+// return *mpBegin;
+// }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ #define EASTL_EMPTY_REFERENCE_ASSERT_ENABLED EASTL_ASSERT_ENABLED
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// SetAssertionFailureFunction
+//
+// Allows the user to set a custom assertion failure mechanism.
+//
+// Example usage:
+// void Assert(const char* pExpression, void* pContext);
+// SetAssertionFailureFunction(Assert, this);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ASSERTION_FAILURE_DEFINED
+ #define EASTL_ASSERTION_FAILURE_DEFINED
+
+ namespace eastl
+ {
+ typedef void (*EASTL_AssertionFailureFunction)(const char* pExpression, void* pContext);
+ EASTL_API void SetAssertionFailureFunction(EASTL_AssertionFailureFunction pFunction, void* pContext);
+
+ // These are the internal default functions that implement asserts.
+ EASTL_API void AssertionFailure(const char* pExpression);
+ EASTL_API void AssertionFailureFunctionDefault(const char* pExpression, void* pContext);
+ }
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ASSERT
+//
+// Assertion macro. Can be overridden by user with a different value.
+//
+// Example usage:
+// EASTL_ASSERT(intVector.size() < 100);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ASSERT
+ #if EASTL_ASSERT_ENABLED
+ #define EASTL_ASSERT(expression) \
+ EA_DISABLE_VC_WARNING(4127) \
+ do { \
+ EA_ANALYSIS_ASSUME(expression); \
+ (void)((expression) || (eastl::AssertionFailure(#expression), 0)); \
+ } while (0) \
+ EA_RESTORE_VC_WARNING()
+ #else
+ #define EASTL_ASSERT(expression)
+ #endif
+#endif
+
+// Developer assert. Helps EASTL developers assert EASTL is coded correctly.
+// Normally disabled for users since it validates internal things and not user things.
+#ifndef EASTL_DEV_ASSERT
+ #if EASTL_DEV_ASSERT_ENABLED
+ #define EASTL_DEV_ASSERT(expression) \
+ EA_DISABLE_VC_WARNING(4127) \
+ do { \
+ EA_ANALYSIS_ASSUME(expression); \
+ (void)((expression) || (eastl::AssertionFailure(#expression), 0)); \
+ } while(0) \
+ EA_RESTORE_VC_WARNING()
+ #else
+ #define EASTL_DEV_ASSERT(expression)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ASSERT_MSG
+//
+// Example usage:
+// EASTL_ASSERT_MSG(false, "detected error condition!");
+//
+///////////////////////////////////////////////////////////////////////////////
+#ifndef EASTL_ASSERT_MSG
+ #if EASTL_ASSERT_ENABLED
+ #define EASTL_ASSERT_MSG(expression, message) \
+ EA_DISABLE_VC_WARNING(4127) \
+ do { \
+ EA_ANALYSIS_ASSUME(expression); \
+ (void)((expression) || (eastl::AssertionFailure(message), 0)); \
+ } while (0) \
+ EA_RESTORE_VC_WARNING()
+ #else
+ #define EASTL_ASSERT_MSG(expression, message)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_FAIL_MSG
+//
+// Failure macro. Can be overridden by user with a different value.
+//
+// Example usage:
+// EASTL_FAIL("detected error condition!");
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FAIL_MSG
+ #if EASTL_ASSERT_ENABLED
+ #define EASTL_FAIL_MSG(message) (eastl::AssertionFailure(message))
+ #else
+ #define EASTL_FAIL_MSG(message)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_CT_ASSERT / EASTL_CT_ASSERT_NAMED
+//
+// EASTL_CT_ASSERT is a macro for compile time assertion checks, useful for
+// validating *constant* expressions. The advantage over using EASTL_ASSERT
+// is that errors are caught at compile time instead of runtime.
+//
+// Example usage:
+// EASTL_CT_ASSERT(sizeof(uint32_t) == 4);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#define EASTL_CT_ASSERT(expression) static_assert(expression, #expression)
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_CT_ASSERT_MSG
+//
+// EASTL_CT_ASSERT_MSG is a macro for compile time assertion checks, useful for
+// validating *constant* expressions. The advantage over using EASTL_ASSERT
+// is that errors are caught at compile time instead of runtime.
+// The message must be a string literal.
+//
+// Example usage:
+// EASTL_CT_ASSERT_MSG(sizeof(uint32_t) == 4, "The size of uint32_t must be 4.");
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#define EASTL_CT_ASSERT_MSG(expression, message) static_assert(expression, message)
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEBUG_BREAK / EASTL_DEBUG_BREAK_OVERRIDE
+//
+// This function causes an app to immediately stop under the debugger.
+// It is implemented as a macro in order to allow stopping at the site
+// of the call.
+//
+// EASTL_DEBUG_BREAK_OVERRIDE allows one to define EASTL_DEBUG_BREAK directly.
+// This is useful in cases where you desire to disable EASTL_DEBUG_BREAK
+// but do not wish to (or cannot) define a custom void function() to replace
+// EASTL_DEBUG_BREAK callsites.
+//
+// Example usage:
+// EASTL_DEBUG_BREAK();
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_DEBUG_BREAK_OVERRIDE
+ #ifndef EASTL_DEBUG_BREAK
+ #if defined(_MSC_VER) && (_MSC_VER >= 1300)
+ #define EASTL_DEBUG_BREAK() __debugbreak() // This is a compiler intrinsic which will map to appropriate inlined asm for the platform.
+ #elif (defined(EA_PROCESSOR_ARM) && !defined(EA_PROCESSOR_ARM64)) && defined(__APPLE__)
+ #define EASTL_DEBUG_BREAK() asm("trap")
+ #elif defined(EA_PROCESSOR_ARM64) && defined(__APPLE__)
+ #include <signal.h>
+ #include <unistd.h>
+ #define EASTL_DEBUG_BREAK() kill( getpid(), SIGINT )
+ #elif defined(EA_PROCESSOR_ARM64) && defined(__GNUC__)
+ #define EASTL_DEBUG_BREAK() asm("brk 10")
+ #elif defined(EA_PROCESSOR_ARM) && defined(__GNUC__)
+ #define EASTL_DEBUG_BREAK() asm("BKPT 10") // The 10 is arbitrary. It's just a unique id.
+ #elif defined(EA_PROCESSOR_ARM) && defined(__ARMCC_VERSION)
+ #define EASTL_DEBUG_BREAK() __breakpoint(10)
+ #elif defined(EA_PROCESSOR_POWERPC) // Generic PowerPC.
+ #define EASTL_DEBUG_BREAK() asm(".long 0") // This triggers an exception by executing opcode 0x00000000.
+ #elif (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) && defined(EA_ASM_STYLE_INTEL)
+ #define EASTL_DEBUG_BREAK() { __asm int 3 }
+ #elif (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) && (defined(EA_ASM_STYLE_ATT) || defined(__GNUC__))
+ #define EASTL_DEBUG_BREAK() asm("int3")
+ #else
+ void EASTL_DEBUG_BREAK(); // User must define this externally.
+ #endif
+ #else
+ void EASTL_DEBUG_BREAK(); // User must define this externally.
+ #endif
+#else
+ #ifndef EASTL_DEBUG_BREAK
+ #if EASTL_DEBUG_BREAK_OVERRIDE == 1
+ // define an empty callable to satisfy the call site.
+ #define EASTL_DEBUG_BREAK ([]{})
+ #else
+ #define EASTL_DEBUG_BREAK EASTL_DEBUG_BREAK_OVERRIDE
+ #endif
+ #else
+ #error EASTL_DEBUG_BREAK is already defined yet you would like to override it. Please ensure no other headers are already defining EASTL_DEBUG_BREAK before this header (config.h) is included
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALLOCATOR_COPY_ENABLED
+//
+// Defined as 0 or 1. Default is 0 (disabled) until some future date.
+// If enabled (1) then container operator= copies the allocator from the
+// source container. It ideally should be set to enabled but for backwards
+// compatibility with older versions of EASTL it is currently set to 0.
+// Regardless of whether this value is 0 or 1, this container copy constructs
+// or copy assigns allocators.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ALLOCATOR_COPY_ENABLED
+ #define EASTL_ALLOCATOR_COPY_ENABLED 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_FIXED_SIZE_TRACKING_ENABLED
+//
+// Defined as an integer >= 0. Default is same as EASTL_DEBUG.
+// If EASTL_FIXED_SIZE_TRACKING_ENABLED is enabled, then fixed
+// containers in debug builds track the max count of objects
+// that have been in the container. This allows for the tuning
+// of fixed container sizes to their minimum required size.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FIXED_SIZE_TRACKING_ENABLED
+ #define EASTL_FIXED_SIZE_TRACKING_ENABLED EASTL_DEBUG
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_RTTI_ENABLED
+//
+// Defined as 0 or 1. Default is 1 if RTTI is supported by the compiler.
+// This define exists so that we can use some dynamic_cast operations in the
+// code without warning. dynamic_cast is only used if the specifically refers
+// to it; EASTL won't do dynamic_cast behind your back.
+//
+// Example usage:
+// #if EASTL_RTTI_ENABLED
+// pChildClass = dynamic_cast<ChildClass*>(pParentClass);
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_RTTI_ENABLED
+ // The VC++ default Standard Library (Dinkumware) disables major parts of RTTI
+ // (e.g. type_info) if exceptions are disabled, even if RTTI itself is enabled.
+ // _HAS_EXCEPTIONS is defined by Dinkumware to 0 or 1 (disabled or enabled).
+ #if defined(EA_COMPILER_NO_RTTI) || (defined(_MSC_VER) && defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && !(defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS))
+ #define EASTL_RTTI_ENABLED 0
+ #else
+ #define EASTL_RTTI_ENABLED 1
+ #endif
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EXCEPTIONS_ENABLED
+//
+// Defined as 0 or 1. Default is to follow what the compiler settings are.
+// The user can predefine EASTL_EXCEPTIONS_ENABLED to 0 or 1; however, if the
+// compiler is set to disable exceptions then EASTL_EXCEPTIONS_ENABLED is
+// forced to a value of 0 regardless of the user predefine.
+//
+// Note that we do not enable EASTL exceptions by default if the compiler
+// has exceptions enabled. To enable EASTL_EXCEPTIONS_ENABLED you need to
+// manually set it to 1.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#if !defined(EASTL_EXCEPTIONS_ENABLED) || ((EASTL_EXCEPTIONS_ENABLED == 1) && defined(EA_COMPILER_NO_EXCEPTIONS))
+ #define EASTL_EXCEPTIONS_ENABLED 0
+#endif
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STRING_OPT_XXXX
+//
+// Enables some options / optimizations options that cause the string class
+// to behave slightly different from the C++ standard basic_string. These are
+// options whereby you can improve performance by avoiding operations that
+// in practice may never occur for you.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STRING_OPT_EXPLICIT_CTORS
+ // Defined as 0 or 1. Default is 0.
+ // Defines if we should implement explicity in constructors where the C++
+ // standard string does not. The advantage of enabling explicit constructors
+ // is that you can do this: string s = "hello"; in addition to string s("hello");
+ // The disadvantage of enabling explicity constructors is that there can be
+ // silent conversions done which impede performance if the user isn't paying
+ // attention.
+ // C++ standard string ctors are not explicit.
+ #define EASTL_STRING_OPT_EXPLICIT_CTORS 0
+#endif
+
+#ifndef EASTL_STRING_OPT_LENGTH_ERRORS
+ // Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED.
+ // Defines if we check for string values going beyond kMaxSize
+ // (a very large value) and throw exections if so.
+ // C++ standard strings are expected to do such checks.
+ #define EASTL_STRING_OPT_LENGTH_ERRORS EASTL_EXCEPTIONS_ENABLED
+#endif
+
+#ifndef EASTL_STRING_OPT_RANGE_ERRORS
+ // Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED.
+ // Defines if we check for out-of-bounds references to string
+ // positions and throw exceptions if so. Well-behaved code shouldn't
+ // refence out-of-bounds positions and so shouldn't need these checks.
+ // C++ standard strings are expected to do such range checks.
+ #define EASTL_STRING_OPT_RANGE_ERRORS EASTL_EXCEPTIONS_ENABLED
+#endif
+
+#ifndef EASTL_STRING_OPT_ARGUMENT_ERRORS
+ // Defined as 0 or 1. Default is 0.
+ // Defines if we check for NULL ptr arguments passed to string
+ // functions by the user and throw exceptions if so. Well-behaved code
+ // shouldn't pass bad arguments and so shouldn't need these checks.
+ // Also, some users believe that strings should check for NULL pointers
+ // in all their arguments and do no-ops if so. This is very debatable.
+ // C++ standard strings are not required to check for such argument errors.
+ #define EASTL_STRING_OPT_ARGUMENT_ERRORS 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_BITSET_SIZE_T
+//
+// Defined as 0 or 1. Default is 1.
+// Controls whether bitset uses size_t or eastl_size_t.
+//
+#ifndef EASTL_BITSET_SIZE_T
+ #define EASTL_BITSET_SIZE_T 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_INT128_SUPPORTED
+//
+// Defined as 0 or 1.
+//
+#ifndef EASTL_INT128_SUPPORTED
+ #if defined(__SIZEOF_INT128__) || (defined(EA_COMPILER_INTMAX_SIZE) && (EA_COMPILER_INTMAX_SIZE >= 16))
+ #define EASTL_INT128_SUPPORTED 1
+ #else
+ #define EASTL_INT128_SUPPORTED 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED
+//
+// Defined as 0 or 1.
+// Tells if you can use the default EASTL allocator to do aligned allocations,
+// which for most uses tells if you can store aligned objects in containers
+// that use default allocators. It turns out that when built as a DLL for
+// some platforms, EASTL doesn't have a way to do aligned allocations, as it
+// doesn't have a heap that supports it. There is a way to work around this
+// with dynamically defined allocators, but that's currently a to-do.
+//
+#ifndef EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED
+ #if EASTL_DLL
+ #define EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED 0
+ #else
+ #define EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED 1
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_INT128_DEFINED
+//
+// Defined as 0 or 1.
+// Specifies whether eastl_int128_t/eastl_uint128_t have been typedef'd yet.
+//
+#ifndef EASTL_INT128_DEFINED
+ #if EASTL_INT128_SUPPORTED
+ #define EASTL_INT128_DEFINED 1
+
+ #if defined(__SIZEOF_INT128__) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)
+ typedef __int128_t eastl_int128_t;
+ typedef __uint128_t eastl_uint128_t;
+ #else
+ typedef int128_t eastl_int128_t; // The EAStdC package defines an EA::StdC::int128_t and uint128_t type,
+ typedef uint128_t eastl_uint128_t; // though they are currently within the EA::StdC namespace.
+ #endif
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_BITSET_WORD_TYPE_DEFAULT / EASTL_BITSET_WORD_SIZE_DEFAULT
+//
+// Defined as an integral power of two type, usually uint32_t or uint64_t.
+// Specifies the word type that bitset should use internally to implement
+// storage. By default this is the platform register word size, but there
+// may be reasons to use a different value.
+//
+// Defines the integral data type used by bitset by default.
+// You can override this default on a bitset-by-bitset case by supplying a
+// custom bitset WordType template parameter.
+//
+// The C++ standard specifies that the std::bitset word type be unsigned long,
+// but that isn't necessarily the most efficient data type for the given platform.
+// We can follow the standard and be potentially less efficient or we can do what
+// is more efficient but less like the C++ std::bitset.
+//
+#if !defined(EASTL_BITSET_WORD_TYPE_DEFAULT)
+ #if defined(EASTL_BITSET_WORD_SIZE) // EASTL_BITSET_WORD_SIZE is deprecated, but we temporarily support the ability for the user to specify it. Use EASTL_BITSET_WORD_TYPE_DEFAULT instead.
+ #if (EASTL_BITSET_WORD_SIZE == 4)
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint32_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 4
+ #else
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint64_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 8
+ #endif
+ #elif (EA_PLATFORM_WORD_SIZE == 16) // EA_PLATFORM_WORD_SIZE is defined in EABase.
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint128_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 16
+ #elif (EA_PLATFORM_WORD_SIZE == 8)
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint64_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 8
+ #elif (EA_PLATFORM_WORD_SIZE == 4)
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint32_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 4
+ #else
+ #define EASTL_BITSET_WORD_TYPE_DEFAULT uint16_t
+ #define EASTL_BITSET_WORD_SIZE_DEFAULT 2
+ #endif
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIST_SIZE_CACHE
+//
+// Defined as 0 or 1. Default is 1. Changed from 0 in version 1.16.01.
+// If defined as 1, the list and slist containers (and possibly any additional
+// containers as well) keep a member mSize (or similar) variable which allows
+// the size() member function to execute in constant time (a.k.a. O(1)).
+// There are debates on both sides as to whether it is better to have this
+// cached value or not, as having it entails some cost (memory and code).
+// To consider: Make list size caching an optional template parameter.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_LIST_SIZE_CACHE
+ #define EASTL_LIST_SIZE_CACHE 1
+#endif
+
+#ifndef EASTL_SLIST_SIZE_CACHE
+ #define EASTL_SLIST_SIZE_CACHE 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MAX_STACK_USAGE
+//
+// Defined as an integer greater than zero. Default is 4000.
+// There are some places in EASTL where temporary objects are put on the
+// stack. A common example of this is in the implementation of container
+// swap functions whereby a temporary copy of the container is made.
+// There is a problem, however, if the size of the item created on the stack
+// is very large. This can happen with fixed-size containers, for example.
+// The EASTL_MAX_STACK_USAGE define specifies the maximum amount of memory
+// (in bytes) that the given platform/compiler will safely allow on the stack.
+// Platforms such as Windows will generally allow larger values than embedded
+// systems or console machines, but it is usually a good idea to stick with
+// a max usage value that is portable across all platforms, lest the user be
+// surprised when something breaks as it is ported to another platform.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_MAX_STACK_USAGE
+ #define EASTL_MAX_STACK_USAGE 4000
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VA_COPY_ENABLED
+//
+// Defined as 0 or 1. Default is 1 for compilers that need it, 0 for others.
+// Some compilers on some platforms implement va_list whereby its contents
+// are destroyed upon usage, even if passed by value to another function.
+// With these compilers you can use va_copy to save and restore a va_list.
+// Known compiler/platforms that destroy va_list contents upon usage include:
+// CodeWarrior on PowerPC
+// GCC on x86-64
+// However, va_copy is part of the C99 standard and not part of earlier C and
+// C++ standards. So not all compilers support it. VC++ doesn't support va_copy,
+// but it turns out that VC++ doesn't usually need it on the platforms it supports,
+// and va_copy can usually be implemented via memcpy(va_list, va_list) with VC++.
+//
+// Example usage:
+// void Function(va_list arguments)
+// {
+// #if EASTL_VA_COPY_ENABLED
+// va_list argumentsCopy;
+// va_copy(argumentsCopy, arguments);
+// #endif
+// <use arguments or argumentsCopy>
+// #if EASTL_VA_COPY_ENABLED
+// va_end(argumentsCopy);
+// #endif
+// }
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VA_COPY_ENABLED
+ #if ((defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__)) && (!defined(__i386__) || defined(__x86_64__)) && !defined(__ppc__) && !defined(__PPC__) && !defined(__PPC64__)
+ #define EASTL_VA_COPY_ENABLED 1
+ #else
+ #define EASTL_VA_COPY_ENABLED 0
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_OPERATOR_EQUALS_OTHER_ENABLED
+//
+// Defined as 0 or 1. Default is 0 until such day that it's deemed safe.
+// When enabled, enables operator= for other char types, e.g. for code
+// like this:
+// eastl::string8 s8;
+// eastl::string16 s16;
+// s8 = s16;
+// This option is considered experimental, and may exist as such for an
+// indefinite amount of time.
+//
+#if !defined(EASTL_OPERATOR_EQUALS_OTHER_ENABLED)
+ #define EASTL_OPERATOR_EQUALS_OTHER_ENABLED 0
+#endif
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIST_PROXY_ENABLED
+//
+#if !defined(EASTL_LIST_PROXY_ENABLED)
+ // GCC with -fstrict-aliasing has bugs (or undocumented functionality in their
+ // __may_alias__ implementation. The compiler gets confused about function signatures.
+ // VC8 (1400) doesn't need the proxy because it has built-in smart debugging capabilities.
+ #if defined(EASTL_DEBUG) && !defined(__GNUC__) && (!defined(_MSC_VER) || (_MSC_VER < 1400))
+ #define EASTL_LIST_PROXY_ENABLED 1
+ #define EASTL_LIST_PROXY_MAY_ALIAS EASTL_MAY_ALIAS
+ #else
+ #define EASTL_LIST_PROXY_ENABLED 0
+ #define EASTL_LIST_PROXY_MAY_ALIAS
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STD_ITERATOR_CATEGORY_ENABLED
+//
+// Defined as 0 or 1. Default is 0.
+// If defined as non-zero, EASTL iterator categories (iterator.h's input_iterator_tag,
+// forward_iterator_tag, etc.) are defined to be those from std C++ in the std
+// namespace. The reason for wanting to enable such a feature is that it allows
+// EASTL containers and algorithms to work with std STL containes and algorithms.
+// The default value was changed from 1 to 0 in EASL 1.13.03, January 11, 2012.
+// The reason for the change was that almost nobody was taking advantage of it and
+// it was slowing down compile times for some compilers quite a bit due to them
+// having a lot of headers behind <iterator>.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STD_ITERATOR_CATEGORY_ENABLED
+ #define EASTL_STD_ITERATOR_CATEGORY_ENABLED 0
+#endif
+
+#if EASTL_STD_ITERATOR_CATEGORY_ENABLED
+ #define EASTL_ITC_NS std
+#else
+ #define EASTL_ITC_NS eastl
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATION_ENABLED
+//
+// Defined as an integer >= 0. Default is to be equal to EASTL_DEBUG.
+// If nonzero, then a certain amount of automatic runtime validation is done.
+// Runtime validation is not considered the same thing as asserting that user
+// input values are valid. Validation refers to internal consistency checking
+// of the validity of containers and their iterators. Validation checking is
+// something that often involves significantly more than basic assertion
+// checking, and it may sometimes be desirable to disable it.
+// This macro would generally be used internally by EASTL.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATION_ENABLED
+ #define EASTL_VALIDATION_ENABLED EASTL_DEBUG
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATE_COMPARE
+//
+// Defined as EASTL_ASSERT or defined away. Default is EASTL_ASSERT if EASTL_VALIDATION_ENABLED is enabled.
+// This is used to validate user-supplied comparison functions, particularly for sorting purposes.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATE_COMPARE_ENABLED
+ #define EASTL_VALIDATE_COMPARE_ENABLED EASTL_VALIDATION_ENABLED
+#endif
+
+#if EASTL_VALIDATE_COMPARE_ENABLED
+ #define EASTL_VALIDATE_COMPARE EASTL_ASSERT
+#else
+ #define EASTL_VALIDATE_COMPARE(expression)
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATE_INTRUSIVE_LIST
+//
+// Defined as an integral value >= 0. Controls the amount of automatic validation
+// done by intrusive_list. A value of 0 means no automatic validation is done.
+// As of this writing, EASTL_VALIDATE_INTRUSIVE_LIST defaults to 0, as it makes
+// the intrusive_list_node become a non-POD, which may be an issue for some code.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATE_INTRUSIVE_LIST
+ #define EASTL_VALIDATE_INTRUSIVE_LIST 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_FORCE_INLINE
+//
+// Defined as a "force inline" expression or defined away.
+// You generally don't need to use forced inlining with the Microsoft and
+// Metrowerks compilers, but you may need it with the GCC compiler (any version).
+//
+// Example usage:
+// template <typename T, typename Allocator>
+// EASTL_FORCE_INLINE typename vector<T, Allocator>::size_type
+// vector<T, Allocator>::size() const
+// { return mpEnd - mpBegin; }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FORCE_INLINE
+ #define EASTL_FORCE_INLINE EA_FORCE_INLINE
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MAY_ALIAS
+//
+// Defined as a macro that wraps the GCC may_alias attribute. This attribute
+// has no significance for VC++ because VC++ doesn't support the concept of
+// strict aliasing. Users should avoid writing code that breaks strict
+// aliasing rules; EASTL_MAY_ALIAS is for cases with no alternative.
+//
+// Example usage:
+// uint32_t value EASTL_MAY_ALIAS;
+//
+// Example usage:
+// typedef uint32_t EASTL_MAY_ALIAS value_type;
+// value_type value;
+//
+#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 303) && !defined(EA_COMPILER_RVCT)
+ #define EASTL_MAY_ALIAS __attribute__((__may_alias__))
+#else
+ #define EASTL_MAY_ALIAS
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIKELY / EASTL_UNLIKELY
+//
+// Defined as a macro which gives a hint to the compiler for branch
+// prediction. GCC gives you the ability to manually give a hint to
+// the compiler about the result of a comparison, though it's often
+// best to compile shipping code with profiling feedback under both
+// GCC (-fprofile-arcs) and VC++ (/LTCG:PGO, etc.). However, there
+// are times when you feel very sure that a boolean expression will
+// usually evaluate to either true or false and can help the compiler
+// by using an explicity directive...
+//
+// Example usage:
+// if(EASTL_LIKELY(a == 0)) // Tell the compiler that a will usually equal 0.
+// { ... }
+//
+// Example usage:
+// if(EASTL_UNLIKELY(a == 0)) // Tell the compiler that a will usually not equal 0.
+// { ... }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_LIKELY
+ #if defined(__GNUC__) && (__GNUC__ >= 3)
+ #define EASTL_LIKELY(x) __builtin_expect(!!(x), true)
+ #define EASTL_UNLIKELY(x) __builtin_expect(!!(x), false)
+ #else
+ #define EASTL_LIKELY(x) (x)
+ #define EASTL_UNLIKELY(x) (x)
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STD_TYPE_TRAITS_AVAILABLE
+//
+// Defined as 0 or 1; default is based on auto-detection.
+// Specifies whether Standard C++11 <type_traits> support exists.
+// Sometimes the auto-detection below fails to work properly and the
+// user needs to override it. Does not define whether the compiler provides
+// built-in compiler type trait support (e.g. __is_abstract()), as some
+// compilers will EASTL_STD_TYPE_TRAITS_AVAILABLE = 0, but have built
+// in type trait support.
+//
+#ifndef EASTL_STD_TYPE_TRAITS_AVAILABLE
+ /* Disabled because we don't currently need it.
+ #if defined(_MSC_VER) && (_MSC_VER >= 1500) // VS2008 or later
+ #pragma warning(push, 0)
+ #include <yvals.h>
+ #pragma warning(pop)
+ #if ((defined(_HAS_TR1) && _HAS_TR1) || _MSC_VER >= 1700) // VS2012 (1700) and later has built-in type traits support.
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 1
+ #include <type_traits>
+ #else
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0
+ #endif
+
+ #elif defined(EA_COMPILER_CLANG) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003) && !defined(__GCCXML__)) && !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY)
+ #include <cstddef> // This will define __GLIBCXX__ if using GNU's libstdc++ and _LIBCPP_VERSION if using clang's libc++.
+
+ #if defined(EA_COMPILER_CLANG) && !defined(EA_PLATFORM_APPLE) // As of v3.0.0, Apple's clang doesn't support type traits.
+ // http://clang.llvm.org/docs/LanguageExtensions.html#checking_type_traits
+ // Clang has some built-in compiler trait support. This support doesn't currently
+ // directly cover all our type_traits, though the C++ Standard Library that's used
+ // with clang could fill that in.
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 1
+ #endif
+
+ #if !defined(EASTL_STD_TYPE_TRAITS_AVAILABLE)
+ #if defined(_LIBCPP_VERSION) // This is defined by clang's libc++.
+ #include <type_traits>
+
+ #elif defined(__GLIBCXX__) && (__GLIBCXX__ >= 20090124) // It's not clear if this is the oldest version that has type traits; probably it isn't.
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 1
+
+ #if defined(__GXX_EXPERIMENTAL_CXX0X__) // To do: Update this test to include conforming C++11 implementations.
+ #include <type_traits>
+ #else
+ #include <tr1/type_traits>
+ #endif
+ #else
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0
+ #endif
+ #endif
+
+ #elif defined(__MSL_CPP__) && (__MSL_CPP__ >= 0x8000) // CodeWarrior compiler.
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0
+ // To do: Implement support for this (via modifying the EASTL type
+ // traits headers, as CodeWarrior provides this.
+ #else
+ #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0
+ #endif
+ */
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE
+//
+// Defined as 0 or 1; default is based on auto-detection.
+// Specifies whether the compiler provides built-in compiler type trait support
+// (e.g. __is_abstract()). Does not specify any details about which traits
+// are available or what their standards-compliance is. Nevertheless this is a
+// useful macro identifier for our type traits implementation.
+//
+#ifndef EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE
+ #if defined(_MSC_VER) && (_MSC_VER >= 1500) // VS2008 or later
+ #pragma warning(push, 0)
+ #include <yvals.h>
+ #pragma warning(pop)
+ #if ((defined(_HAS_TR1) && _HAS_TR1) || _MSC_VER >= 1700) // VS2012 (1700) and later has built-in type traits support.
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1
+ #else
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0
+ #endif
+ #elif defined(EA_COMPILER_CLANG) && defined(__APPLE__) && defined(_CXXCONFIG) // Apple clang but with GCC's libstdc++.
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0
+ #elif defined(EA_COMPILER_CLANG)
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1
+ #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003) && !defined(__GCCXML__)
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1
+ #elif defined(__MSL_CPP__) && (__MSL_CPP__ >= 0x8000) // CodeWarrior compiler.
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1
+ #else
+ #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_RESET_ENABLED
+//
+// Defined as 0 or 1; default is 1 for the time being.
+// The reset_lose_memory function works the same as reset, as described below.
+//
+// Specifies whether the container reset functionality is enabled. If enabled
+// then <container>::reset forgets its memory, otherwise it acts as the clear
+// function. The reset function is potentially dangerous, as it (by design)
+// causes containers to not free their memory.
+// This option has no applicability to the bitset::reset function, as bitset
+// isn't really a container. Also it has no applicability to the smart pointer
+// wrappers (e.g. intrusive_ptr).
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_RESET_ENABLED
+ #define EASTL_RESET_ENABLED 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MINMAX_ENABLED
+//
+// Defined as 0 or 1; default is 1.
+// Specifies whether the min and max algorithms are available.
+// It may be useful to disable the min and max algorithms because sometimes
+// #defines for min and max exist which would collide with EASTL min and max.
+// Note that there are already alternative versions of min and max in EASTL
+// with the min_alt and max_alt functions. You can use these without colliding
+// with min/max macros that may exist.
+//
+///////////////////////////////////////////////////////////////////////////////
+#ifndef EASTL_MINMAX_ENABLED
+ #define EASTL_MINMAX_ENABLED 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NOMINMAX
+//
+// Defined as 0 or 1; default is 1.
+// MSVC++ has #defines for min/max which collide with the min/max algorithm
+// declarations. If EASTL_NOMINMAX is defined as 1, then we undefine min and
+// max if they are #defined by an external library. This allows our min and
+// max definitions in algorithm.h to work as expected. An alternative to
+// the enabling of EASTL_NOMINMAX is to #define NOMINMAX in your project
+// settings if you are compiling for Windows.
+// Note that this does not control the availability of the EASTL min and max
+// algorithms; the EASTL_MINMAX_ENABLED configuration parameter does that.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_NOMINMAX
+ #define EASTL_NOMINMAX 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STD_CPP_ONLY
+//
+// Defined as 0 or 1; default is 0.
+// Disables the use of compiler language extensions. We use compiler language
+// extensions only in the case that they provide some benefit that can't be
+// had any other practical way. But sometimes the compiler is set to disable
+// language extensions or sometimes one compiler's preprocesor is used to generate
+// code for another compiler, and so it's necessary to disable language extension usage.
+//
+// Example usage:
+// #if defined(_MSC_VER) && !EASTL_STD_CPP_ONLY
+// enum : size_type { npos = container_type::npos }; // Microsoft extension which results in significantly smaller debug symbols.
+// #else
+// static const size_type npos = container_type::npos;
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STD_CPP_ONLY
+ #define EASTL_STD_CPP_ONLY 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NO_RVALUE_REFERENCES
+//
+// Defined as 0 or 1.
+// This is the same as EABase EA_COMPILER_NO_RVALUE_REFERENCES except that it
+// follows the convention of being always defined, as 0 or 1.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_NO_RVALUE_REFERENCES)
+ #if defined(EA_COMPILER_NO_RVALUE_REFERENCES)
+ #define EASTL_NO_RVALUE_REFERENCES 1
+ #else
+ #define EASTL_NO_RVALUE_REFERENCES 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MOVE_SEMANTICS_ENABLED
+//
+// Defined as 0 or 1.
+// If enabled then C++11-like functionality with rvalue references and move
+// operations is enabled.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_MOVE_SEMANTICS_ENABLED)
+ #if EASTL_NO_RVALUE_REFERENCES // If the compiler doesn't support rvalue references or EASTL is configured to disable them...
+ #define EASTL_MOVE_SEMANTICS_ENABLED 0
+ #else
+ #define EASTL_MOVE_SEMANTICS_ENABLED 1
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VARIADIC_TEMPLATES_ENABLED
+//
+// Defined as 0 or 1.
+// If enabled then C++11-like functionality with variadic templates is enabled.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_VARIADIC_TEMPLATES_ENABLED)
+ #if defined(EA_COMPILER_NO_VARIADIC_TEMPLATES) // If the compiler doesn't support variadic templates
+ #define EASTL_VARIADIC_TEMPLATES_ENABLED 0
+ #else
+ #define EASTL_VARIADIC_TEMPLATES_ENABLED 1
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VARIABLE_TEMPLATES_ENABLED
+//
+// Defined as 0 or 1.
+// If enabled then C++11-like functionality with variable templates is enabled.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_VARIABLE_TEMPLATES_ENABLED)
+ #if((EABASE_VERSION_N < 20605) || defined(EA_COMPILER_NO_VARIABLE_TEMPLATES))
+ #define EASTL_VARIABLE_TEMPLATES_ENABLED 0
+ #else
+ #define EASTL_VARIABLE_TEMPLATES_ENABLED 1
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_INLINE_VARIABLE_ENABLED
+//
+// Defined as 0 or 1.
+// If enabled then C++17-like functionality with inline variable is enabled.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_INLINE_VARIABLE_ENABLED)
+ #if((EABASE_VERSION_N < 20707) || defined(EA_COMPILER_NO_INLINE_VARIABLES))
+ #define EASTL_INLINE_VARIABLE_ENABLED 0
+ #else
+ #define EASTL_INLINE_VARIABLE_ENABLED 1
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_CPP17_INLINE_VARIABLE
+//
+// Used to prefix a variable as inline when C++17 inline variables are available
+// Usage: EASTL_CPP17_INLINE_VARIABLE constexpr bool type_trait_v = type_trait::value
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_CPP17_INLINE_VARIABLE)
+ #if EASTL_INLINE_VARIABLE_ENABLED
+ #define EASTL_CPP17_INLINE_VARIABLE inline
+ #else
+ #define EASTL_CPP17_INLINE_VARIABLE
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_HAVE_CPP11_TYPE_TRAITS
+//
+// Defined as 0 or 1.
+// This is the same as EABase EA_HAVE_CPP11_TYPE_TRAITS except that it
+// follows the convention of being always defined, as 0 or 1. Note that this
+// identifies if the Standard Library has C++11 type traits and not if EASTL
+// has its equivalents to C++11 type traits.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_HAVE_CPP11_TYPE_TRAITS)
+ // To do: Change this to use the EABase implementation once we have a few months of testing
+ // of this and we are sure it works right. Do this at some point after ~January 2014.
+ #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+
+ #define EASTL_HAVE_CPP11_TYPE_TRAITS 1
+ #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) // Prior versions of libstdc++ have incomplete support for C++11 type traits.
+ #define EASTL_HAVE_CPP11_TYPE_TRAITS 1
+ #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1)
+ #define EASTL_HAVE_CPP11_TYPE_TRAITS 1
+ #else
+ #define EASTL_HAVE_CPP11_TYPE_TRAITS 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS undef
+//
+// We need revise this macro to be undefined in some cases, in case the user
+// isn't using an updated EABase.
+///////////////////////////////////////////////////////////////////////////////
+#if defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) // It may in fact be supported by 4.01 or 4.02 but we don't have compilers to test with.
+ #if defined(EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS)
+ #undef EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NO_RANGE_BASED_FOR_LOOP
+//
+// Defined as 0 or 1.
+// This is the same as EABase EA_COMPILER_NO_RANGE_BASED_FOR_LOOP except that it
+// follows the convention of being always defined, as 0 or 1.
+///////////////////////////////////////////////////////////////////////////////
+#if !defined(EASTL_NO_RANGE_BASED_FOR_LOOP)
+ #if defined(EA_COMPILER_NO_RANGE_BASED_FOR_LOOP)
+ #define EASTL_NO_RANGE_BASED_FOR_LOOP 1
+ #else
+ #define EASTL_NO_RANGE_BASED_FOR_LOOP 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALIGN_OF
+//
+// Determines the alignment of a type.
+//
+// Example usage:
+// size_t alignment = EASTL_ALIGN_OF(int);
+//
+///////////////////////////////////////////////////////////////////////////////
+#ifndef EASTL_ALIGN_OF
+ #define EASTL_ALIGN_OF alignof
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// eastl_size_t
+//
+// Defined as an unsigned integer type, usually either size_t or uint32_t.
+// Defaults to size_t to match std STL unless the user specifies to use
+// uint32_t explicitly via the EASTL_SIZE_T_32BIT define
+//
+// Example usage:
+// eastl_size_t n = intVector.size();
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_SIZE_T_32BIT // Defines whether EASTL_SIZE_T uses uint32_t/int32_t as opposed to size_t/ssize_t.
+ #define EASTL_SIZE_T_32BIT 0 // This makes a difference on 64 bit platforms because they use a 64 bit size_t.
+#endif // By default we do the same thing as std STL and use size_t.
+
+#ifndef EASTL_SIZE_T
+ #if (EASTL_SIZE_T_32BIT == 0) || (EA_PLATFORM_WORD_SIZE == 4)
+ #include <stddef.h>
+ #define EASTL_SIZE_T size_t
+ #define EASTL_SSIZE_T intptr_t
+ #else
+ #define EASTL_SIZE_T uint32_t
+ #define EASTL_SSIZE_T int32_t
+ #endif
+#endif
+
+typedef EASTL_SIZE_T eastl_size_t; // Same concept as std::size_t.
+typedef EASTL_SSIZE_T eastl_ssize_t; // Signed version of eastl_size_t. Concept is similar to Posix's ssize_t.
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// AddRef / Release
+//
+// AddRef and Release are used for "intrusive" reference counting. By the term
+// "intrusive", we mean that the reference count is maintained by the object
+// and not by the user of the object. Given that an object implements referencing
+// counting, the user of the object needs to be able to increment and decrement
+// that reference count. We do that via the venerable AddRef and Release functions
+// which the object must supply. These defines here allow us to specify the name
+// of the functions. They could just as well be defined to addref and delref or
+// IncRef and DecRef.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTLAddRef
+ #define EASTLAddRef AddRef
+#endif
+
+#ifndef EASTLRelease
+ #define EASTLRelease Release
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALLOCATOR_EXPLICIT_ENABLED
+//
+// Defined as 0 or 1. Default is 0 for now but ideally would be changed to
+// 1 some day. It's 0 because setting it to 1 breaks some existing code.
+// This option enables the allocator ctor to be explicit, which avoids
+// some undesirable silent conversions, especially with the string class.
+//
+// Example usage:
+// class allocator
+// {
+// public:
+// EASTL_ALLOCATOR_EXPLICIT allocator(const char* pName);
+// };
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ALLOCATOR_EXPLICIT_ENABLED
+ #define EASTL_ALLOCATOR_EXPLICIT_ENABLED 0
+#endif
+
+#if EASTL_ALLOCATOR_EXPLICIT_ENABLED
+ #define EASTL_ALLOCATOR_EXPLICIT explicit
+#else
+ #define EASTL_ALLOCATOR_EXPLICIT
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALLOCATOR_MIN_ALIGNMENT
+//
+// Defined as an integral power-of-2 that's >= 1.
+// Identifies the minimum alignment that EASTL should assume its allocators
+// use. There is code within EASTL that decides whether to do a Malloc or
+// MallocAligned call and it's typically better if it can use the Malloc call.
+// But this requires knowing what the minimum possible alignment is.
+#if !defined(EASTL_ALLOCATOR_MIN_ALIGNMENT)
+ #define EASTL_ALLOCATOR_MIN_ALIGNMENT EA_PLATFORM_MIN_MALLOC_ALIGNMENT
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT
+//
+// Identifies the minimum alignment that EASTL should assume system allocations
+// from malloc and new will have.
+#if !defined(EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT)
+ #if defined(EA_PLATFORM_MICROSOFT) || defined(EA_PLATFORM_APPLE)
+ #define EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT 16
+ #else
+ #define EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT (EA_PLATFORM_PTR_SIZE * 2)
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL allocator
+//
+// The EASTL allocator system allows you to redefine how memory is allocated
+// via some defines that are set up here. In the container code, memory is
+// allocated via macros which expand to whatever the user has them set to
+// expand to. Given that there are multiple allocator systems available,
+// this system allows you to configure it to use whatever system you want,
+// provided your system meets the requirements of this library.
+// The requirements are:
+//
+// - Must be constructable via a const char* (name) parameter.
+// Some uses of allocators won't require this, however.
+// - Allocate a block of memory of size n and debug name string.
+// - Allocate a block of memory of size n, debug name string,
+// alignment a, and offset o.
+// - Free memory allocated via either of the allocation functions above.
+// - Provide a default allocator instance which can be used if the user
+// doesn't provide a specific one.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// namespace eastl
+// {
+// class allocator
+// {
+// allocator(const char* pName = NULL);
+//
+// void* allocate(size_t n, int flags = 0);
+// void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0);
+// void deallocate(void* p, size_t n);
+//
+// const char* get_name() const;
+// void set_name(const char* pName);
+// };
+//
+// allocator* GetDefaultAllocator(); // This is used for anonymous allocations.
+// }
+
+#ifndef EASTLAlloc // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does.
+ #define EASTLAlloc(allocator, n) (allocator).allocate(n);
+#endif
+
+#ifndef EASTLAllocFlags // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does.
+ #define EASTLAllocFlags(allocator, n, flags) (allocator).allocate(n, flags);
+#endif
+
+#ifndef EASTLAllocAligned
+ #define EASTLAllocAligned(allocator, n, alignment, offset) (allocator).allocate((n), (alignment), (offset))
+#endif
+
+#ifndef EASTLAllocAlignedFlags
+ #define EASTLAllocAlignedFlags(allocator, n, alignment, offset, flags) (allocator).allocate((n), (alignment), (offset), (flags))
+#endif
+
+#ifndef EASTLFree
+ #define EASTLFree(allocator, p, size) (allocator).deallocate((void*)(p), (size)) // Important to cast to void* as p may be non-const.
+#endif
+
+#ifndef EASTLAllocatorType
+ #define EASTLAllocatorType eastl::allocator
+#endif
+
+#ifndef EASTLDummyAllocatorType
+ #define EASTLDummyAllocatorType eastl::dummy_allocator
+#endif
+
+#ifndef EASTLAllocatorDefault
+ // EASTLAllocatorDefault returns the default allocator instance. This is not a global
+ // allocator which implements all container allocations but is the allocator that is
+ // used when EASTL needs to allocate memory internally. There are very few cases where
+ // EASTL allocates memory internally, and in each of these it is for a sensible reason
+ // that is documented to behave as such.
+ #define EASTLAllocatorDefault eastl::GetDefaultAllocator
+#endif
+
+
+/// EASTL_ALLOCATOR_DEFAULT_NAME
+///
+/// Defines a default allocator name in the absence of a user-provided name.
+///
+#ifndef EASTL_ALLOCATOR_DEFAULT_NAME
+ #define EASTL_ALLOCATOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX // Unless the user overrides something, this is "EASTL".
+#endif
+
+/// EASTL_USE_FORWARD_WORKAROUND
+///
+/// This is to workaround a compiler bug that we found in VS2013. Update 1 did not fix it.
+/// This should be fixed in a future release of VS2013 http://accentuable4.rssing.com/browser.php?indx=3511740&item=15696
+///
+#ifndef EASTL_USE_FORWARD_WORKAROUND
+ #if defined(_MSC_FULL_VER) && _MSC_FULL_VER == 180021005 || (defined(__EDG_VERSION__) && (__EDG_VERSION__ < 405))// VS2013 initial release
+ #define EASTL_USE_FORWARD_WORKAROUND 1
+ #else
+ #define EASTL_USE_FORWARD_WORKAROUND 0
+ #endif
+#endif
+
+
+/// EASTL_TUPLE_ENABLED
+/// EASTL tuple implementation depends on variadic template support
+#if EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_TUPLE_ENABLED 1
+#else
+ #define EASTL_TUPLE_ENABLED 0
+#endif
+
+
+/// EASTL_FUNCTION_ENABLED
+///
+#ifndef EASTL_FUNCTION_ENABLED
+ #define EASTL_FUNCTION_ENABLED 1
+#endif
+
+
+/// EASTL_USER_LITERALS_ENABLED
+#ifndef EASTL_USER_LITERALS_ENABLED
+ #if defined(EA_COMPILER_CPP14_ENABLED)
+ #define EASTL_USER_LITERALS_ENABLED 1
+
+ // Disabling the Clang/GCC/MSVC warning about using user defined literals without a leading '_' as they are
+ // reserved for standard libary usage.
+ EA_DISABLE_CLANG_WARNING(-Wuser-defined-literals)
+ EA_DISABLE_CLANG_WARNING(-Wreserved-user-defined-literal)
+ EA_DISABLE_GCC_WARNING(-Wliteral-suffix)
+ #ifdef _MSC_VER
+ #pragma warning(disable: 4455) // disable warning C4455: literal suffix identifiers that do not start with an underscore are reserved
+ #endif
+
+ #else
+ #define EASTL_USER_LITERALS_ENABLED 0
+ #endif
+#endif
+
+
+/// EASTL_INLINE_NAMESPACES_ENABLED
+#ifndef EASTL_INLINE_NAMESPACES_ENABLED
+ #if defined(EA_COMPILER_CPP14_ENABLED)
+ #define EASTL_INLINE_NAMESPACES_ENABLED 1
+ #else
+ #define EASTL_INLINE_NAMESPACES_ENABLED 0
+ #endif
+#endif
+
+
+/// EASTL_CORE_ALLOCATOR_ENABLED
+#ifndef EASTL_CORE_ALLOCATOR_ENABLED
+ #define EASTL_CORE_ALLOCATOR_ENABLED 0
+#endif
+
+/// EASTL_OPENSOURCE
+/// This is enabled when EASTL is building built in an "open source" mode. Which is a mode that eliminates code
+/// dependencies on other technologies that have not been released publically.
+/// EASTL_OPENSOURCE = 0, is the default.
+/// EASTL_OPENSOURCE = 1, utilizes technologies that not publically available.
+///
+#ifndef EASTL_OPENSOURCE
+ #define EASTL_OPENSOURCE 0
+#endif
+
+
+/// EASTL_OPTIONAL_ENABLED
+#if defined(EA_COMPILER_MSVC_2012)
+ #define EASTL_OPTIONAL_ENABLED 0
+#elif defined(EA_COMPILER_MSVC_2013)
+ #define EASTL_OPTIONAL_ENABLED 0
+#elif defined(EA_COMPILER_MSVC_2015)
+ #define EASTL_OPTIONAL_ENABLED 1
+#elif EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) && !defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS) && defined(EA_COMPILER_CPP11_ENABLED)
+ #define EASTL_OPTIONAL_ENABLED 1
+#else
+ #define EASTL_OPTIONAL_ENABLED 0
+#endif
+
+
+/// EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE
+#if defined(_MSC_VER) && (_MSC_VER >= 1913) // VS2017+
+ #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 1
+#elif defined(EA_COMPILER_CLANG)
+ #if !__is_identifier(__has_unique_object_representations)
+ #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 1
+ #else
+ #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 0
+ #endif
+#else
+ #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 0
+#endif
+
+
+/// EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR
+/// This feature define allows users to toggle the problematic eastl::pair implicit
+/// single element constructor.
+#ifndef EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR
+ #define EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR 0
+#endif
+
+/// EASTL_SYSTEM_BIG_ENDIAN_STATEMENT
+/// EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT
+/// These macros allow you to write endian specific macros as statements.
+/// This allows endian specific code to be macro expanded from within other macros
+///
+#if defined(EA_SYSTEM_BIG_ENDIAN)
+ #define EASTL_SYSTEM_BIG_ENDIAN_STATEMENT(...) __VA_ARGS__
+#else
+ #define EASTL_SYSTEM_BIG_ENDIAN_STATEMENT(...)
+#endif
+
+#if defined(EA_SYSTEM_LITTLE_ENDIAN)
+ #define EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT(...) __VA_ARGS__
+#else
+ #define EASTL_SYSTEM_LITTLE_ENDIAN_STATEMENT(...)
+#endif
+
+
+#endif // Header include guard
diff --git a/include/EASTL/internal/copy_help.h b/include/EASTL/internal/copy_help.h
new file mode 100644
index 0000000..e5fb2ab
--- /dev/null
+++ b/include/EASTL/internal/copy_help.h
@@ -0,0 +1,215 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_COPY_HELP_H
+#define EASTL_INTERNAL_COPY_HELP_H
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <string.h> // memcpy, memcmp, memmove
+
+
+namespace eastl
+{
+ /// move / move_n / move_backward
+ /// copy / copy_n / copy_backward
+ ///
+ /// We want to optimize move, move_n, move_backward, copy, copy_backward, copy_n to do memmove operations
+ /// when possible.
+ ///
+ /// We could possibly use memcpy, though it has stricter overlap requirements than the move and copy
+ /// algorithms and would require a runtime if/else to choose it over memmove. In particular, memcpy
+ /// allows no range overlap at all, whereas move/copy allow output end overlap and move_backward/copy_backward
+ /// allow output begin overlap. Despite this it might be useful to use memcpy for any platforms where
+ /// memcpy is significantly faster than memmove, and since in most cases the copy/move operation in fact
+ /// doesn't target overlapping memory and so memcpy would be usable.
+ ///
+ /// We can use memmove/memcpy if the following hold true:
+ /// InputIterator and OutputIterator are of the same type.
+ /// InputIterator and OutputIterator are of type contiguous_iterator_tag or simply are pointers (the two are virtually synonymous).
+ /// is_trivially_copyable<T>::value is true. i.e. the constructor T(const T& t) (or T(T&& t) if present) can be replaced by memmove(this, &t, sizeof(T))
+ ///
+ /// copy normally differs from move, but there is a case where copy is the same as move: when copy is
+ /// used with a move_iterator. We handle that case here by detecting that copy is being done with a
+ /// move_iterator and redirect it to move (which can take advantage of memmove/memcpy).
+ ///
+ /// The generic_iterator class is typically used for wrapping raw memory pointers so they can act like
+ /// formal iterators. Since pointers provide an opportunity for memmove/memcpy operations, we can
+ /// detect a generic iterator and use it's wrapped type as a pointer if it happens to be one.
+
+ // Implementation moving copying both trivial and non-trivial data via a lesser iterator than random-access.
+ template <typename /*InputIteratorCategory*/, bool /*isMove*/, bool /*canMemmove*/>
+ struct move_and_copy_helper
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ for(; first != last; ++result, ++first)
+ *result = *first;
+ return result;
+ }
+ };
+
+ // Specialization for copying non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const.
+ // This specialization converts the random access InputIterator last-first to an integral type. There's simple way for us to take advantage of a random access output iterator,
+ // as the range is specified by the input instead of the output, and distance(first, last) for a non-random-access iterator is potentially slow.
+ template <>
+ struct move_and_copy_helper<EASTL_ITC_NS::random_access_iterator_tag, false, false>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::difference_type difference_type;
+
+ for(difference_type n = (last - first); n > 0; --n, ++first, ++result)
+ *result = *first;
+
+ return result;
+ }
+ };
+
+ // Specialization for moving non-trivial data via a lesser iterator than random-access.
+ template <typename InputIteratorCategory>
+ struct move_and_copy_helper<InputIteratorCategory, true, false>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ for(; first != last; ++result, ++first)
+ *result = eastl::move(*first);
+ return result;
+ }
+ };
+
+ // Specialization for moving non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const.
+ template <>
+ struct move_and_copy_helper<EASTL_ITC_NS::random_access_iterator_tag, true, false>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::difference_type difference_type;
+
+ for(difference_type n = (last - first); n > 0; --n, ++first, ++result)
+ *result = eastl::move(*first);
+
+ return result;
+ }
+ };
+
+ // Specialization for when we can use memmove/memcpy. See the notes above for what conditions allow this.
+ template <bool isMove>
+ struct move_and_copy_helper<EASTL_ITC_NS::random_access_iterator_tag, isMove, true>
+ {
+ template <typename T>
+ static T* move_or_copy(const T* first, const T* last, T* result)
+ {
+ if (EASTL_UNLIKELY(first == last))
+ return result;
+
+ // We could use memcpy here if there's no range overlap, but memcpy is rarely much faster than memmove.
+ return (T*)memmove(result, first, (size_t)((uintptr_t)last - (uintptr_t)first)) + (last - first);
+ }
+ };
+
+
+
+ template <bool isMove, typename InputIterator, typename OutputIterator>
+ inline OutputIterator move_and_copy_chooser(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IIC;
+ typedef typename eastl::iterator_traits<OutputIterator>::iterator_category OIC;
+ typedef typename eastl::iterator_traits<InputIterator>::value_type value_type_input;
+ typedef typename eastl::iterator_traits<OutputIterator>::value_type value_type_output;
+
+ const bool canBeMemmoved = eastl::is_trivially_copyable<value_type_output>::value &&
+ eastl::is_same<value_type_input, value_type_output>::value &&
+ (eastl::is_pointer<InputIterator>::value || eastl::is_same<IIC, eastl::contiguous_iterator_tag>::value) &&
+ (eastl::is_pointer<OutputIterator>::value || eastl::is_same<OIC, eastl::contiguous_iterator_tag>::value);
+
+ return eastl::move_and_copy_helper<IIC, isMove, canBeMemmoved>::move_or_copy(first, last, result); // Need to chose based on the input iterator tag and not the output iterator tag, because containers accept input ranges of iterator types different than self.
+ }
+
+
+ // We have a second layer of unwrap_iterator calls because the original iterator might be something like move_iterator<generic_iterator<int*> > (i.e. doubly-wrapped).
+ template <bool isMove, typename InputIterator, typename OutputIterator>
+ inline OutputIterator move_and_copy_unwrapper(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return OutputIterator(eastl::move_and_copy_chooser<isMove>(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), eastl::unwrap_iterator(result))); // Have to convert to OutputIterator because result.base() could be a T*
+ }
+
+
+ /// move
+ ///
+ /// After this operation the elements in the moved-from range will still contain valid values of the
+ /// appropriate type, but not necessarily the same values as before the move.
+ /// Returns the end of the result range.
+ /// Note: When moving between containers, the dest range must be valid; this function doesn't resize containers.
+ /// Note: if result is within [first, last), move_backward must be used instead of move.
+ ///
+ /// Example usage:
+ /// eastl::move(myArray.begin(), myArray.end(), myDestArray.begin());
+ ///
+ /// Reference implementation:
+ /// template <typename InputIterator, typename OutputIterator>
+ /// OutputIterator move(InputIterator first, InputIterator last, OutputIterator result)
+ /// {
+ /// while(first != last)
+ /// *result++ = eastl::move(*first++);
+ /// return result;
+ /// }
+
+ template <typename InputIterator, typename OutputIterator>
+ inline OutputIterator move(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return eastl::move_and_copy_unwrapper<true>(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), result);
+ }
+
+
+ /// copy
+ ///
+ /// Effects: Copies elements in the range [first, last) into the range [result, result + (last - first))
+ /// starting from first and proceeding to last. For each nonnegative integer n < (last - first),
+ /// performs *(result + n) = *(first + n).
+ ///
+ /// Returns: result + (last - first). That is, returns the end of the result. Note that this
+ /// is different from how memmove/memcpy work, as they return the beginning of the result.
+ ///
+ /// Requires: result shall not be in the range [first, last). But the end of the result range
+ /// may in fact be within the input rante.
+ ///
+ /// Complexity: Exactly 'last - first' assignments.
+ ///
+ template <typename InputIterator, typename OutputIterator>
+ inline OutputIterator copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ const bool isMove = eastl::is_move_iterator<InputIterator>::value; EA_UNUSED(isMove);
+
+ return eastl::move_and_copy_unwrapper<isMove>(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), result);
+ }
+} // namespace eastl
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/EASTL/internal/enable_shared.h b/include/EASTL/internal/enable_shared.h
new file mode 100644
index 0000000..ac5f072
--- /dev/null
+++ b/include/EASTL/internal/enable_shared.h
@@ -0,0 +1,83 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_ENABLE_SHARED_H
+#define EASTL_INTERNAL_ENABLE_SHARED_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+namespace eastl
+{
+
+ /// enable_shared_from_this
+ ///
+ /// This is a helper mixin class that allows you to make any class
+ /// export a shared_ptr instance that is associated with the class
+ /// instance. Any class that inherits from this class gets two functions:
+ /// shared_ptr<T> shared_from_this();
+ /// shared_ptr<T> shared_from_this() const;
+ /// If you call shared_from_this, you get back a shared_ptr that
+ /// refers to the class. A second call to shared_from_this returns
+ /// another shared_ptr that is shared with the first one.
+ ///
+ /// The trick that happens which is not so obvious here (and which is
+ /// not mentioned at all in the Boost documentation of their version
+ /// of this) is that the shared_ptr constructor detects that the
+ /// class has an enable_shared_from_this mixin and sets up this system
+ /// automatically for the user. This is done with template tricks.
+ ///
+ /// For some additional explanation, see the Boost documentation for
+ /// their description of their version of enable_shared_from_this.
+ ///
+ template <typename T>
+ class enable_shared_from_this
+ {
+ public:
+ shared_ptr<T> shared_from_this()
+ { return shared_ptr<T>(mWeakPtr); }
+
+ shared_ptr<const T> shared_from_this() const
+ { return shared_ptr<const T>(mWeakPtr); }
+
+ weak_ptr<T> weak_from_this()
+ { return mWeakPtr; }
+
+ weak_ptr<const T> weak_from_this() const
+ { return mWeakPtr; }
+
+ public: // This is public because the alternative fails on some compilers that we need to support.
+ mutable weak_ptr<T> mWeakPtr;
+
+ protected:
+ template <typename U> friend class shared_ptr;
+
+ EA_CONSTEXPR enable_shared_from_this() EA_NOEXCEPT
+ { }
+
+ enable_shared_from_this(const enable_shared_from_this&) EA_NOEXCEPT
+ { }
+
+ enable_shared_from_this& operator=(const enable_shared_from_this&) EA_NOEXCEPT
+ { return *this; }
+
+ ~enable_shared_from_this()
+ { }
+
+ }; // enable_shared_from_this
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
diff --git a/include/EASTL/internal/fill_help.h b/include/EASTL/internal/fill_help.h
new file mode 100644
index 0000000..235a24e
--- /dev/null
+++ b/include/EASTL/internal/fill_help.h
@@ -0,0 +1,484 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_FILL_HELP_H
+#define EASTL_INTERNAL_FILL_HELP_H
+
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+
+#if defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+#include <intrin.h>
+#endif
+
+namespace eastl
+{
+ // fill
+ //
+ // We implement some fill helper functions in order to allow us to optimize it
+ // where possible.
+ //
+ template <bool bIsScalar>
+ struct fill_imp
+ {
+ template <typename ForwardIterator, typename T>
+ static void do_fill(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ // The C++ standard doesn't specify whether we need to create a temporary
+ // or not, but all std STL implementations are written like what we have here.
+ for(; first != last; ++first)
+ *first = value;
+ }
+ };
+
+ template <>
+ struct fill_imp<true>
+ {
+ template <typename ForwardIterator, typename T>
+ static void do_fill(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ // We create a temp and fill from that because value might alias to the
+ // destination range and so the compiler would be forced into generating
+ // less efficient code.
+ for(const T temp = value; first != last; ++first)
+ {
+ EA_UNUSED(temp);
+ *first = static_cast<value_type>(temp);
+ }
+ }
+ };
+
+ /// fill
+ ///
+ /// fill is like memset in that it assigns a single value repeatedly to a
+ /// destination range. It allows for any type of iterator (not just an array)
+ /// and the source value can be any type, not just a byte.
+ /// Note that the source value (which is a reference) can come from within
+ /// the destination range.
+ ///
+ /// Effects: Assigns value through all the iterators in the range [first, last).
+ ///
+ /// Complexity: Exactly 'last - first' assignments.
+ ///
+ /// Note: The C++ standard doesn't specify anything about the value parameter
+ /// coming from within the first-last range. All std STL implementations act
+ /// as if the standard specifies that value must not come from within this range.
+ ///
+ template <typename ForwardIterator, typename T>
+ inline void fill(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ eastl::fill_imp< is_scalar<T>::value >::do_fill(first, last, value);
+
+ // Possibly better implementation, as it will deal with small PODs as well as scalars:
+ // bEasyCopy is true if the type has a trivial constructor (e.g. is a POD) and if
+ // it is small. Thus any built-in type or any small user-defined struct will qualify.
+ //const bool bEasyCopy = eastl::type_and<eastl::has_trivial_constructor<T>::value,
+ // eastl::integral_constant<bool, (sizeof(T) <= 16)>::value;
+ //eastl::fill_imp<bEasyCopy>::do_fill(first, last, value);
+
+ }
+
+ #if(defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ #if defined(EA_PROCESSOR_X86_64)
+ template <typename Value>
+ inline void fill(uint64_t* first, uint64_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ uint64_t value = (uint64_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosq\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+
+
+ template <typename Value>
+ inline void fill(int64_t* first, int64_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ int64_t value = (int64_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosq\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+ #endif
+
+ template <typename Value>
+ inline void fill(uint32_t* first, uint32_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ uint32_t value = (uint32_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosl\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+
+
+ template <typename Value>
+ inline void fill(int32_t* first, int32_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ int32_t value = (int32_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosl\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+
+
+ template <typename Value>
+ inline void fill(uint16_t* first, uint16_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ uint16_t value = (uint16_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosw\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+
+
+ template <typename Value>
+ inline void fill(int16_t* first, int16_t* last, Value c)
+ {
+ uintptr_t count = (uintptr_t)(last - first);
+ int16_t value = (int16_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosw\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ }
+
+ #elif defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ #if defined(EA_PROCESSOR_X86_64)
+ template <typename Value>
+ inline void fill(uint64_t* first, uint64_t* last, Value c)
+ {
+ __stosq(first, (uint64_t)c, (size_t)(last - first));
+ }
+
+ template <typename Value>
+ inline void fill(int64_t* first, int64_t* last, Value c)
+ {
+ __stosq((uint64_t*)first, (uint64_t)c, (size_t)(last - first));
+ }
+ #endif
+
+ template <typename Value>
+ inline void fill(uint32_t* first, uint32_t* last, Value c)
+ {
+ __stosd((unsigned long*)first, (unsigned long)c, (size_t)(last - first));
+ }
+
+ template <typename Value>
+ inline void fill(int32_t* first, int32_t* last, Value c)
+ {
+ __stosd((unsigned long*)first, (unsigned long)c, (size_t)(last - first));
+ }
+
+ template <typename Value>
+ inline void fill(uint16_t* first, uint16_t* last, Value c)
+ {
+ __stosw(first, (uint16_t)c, (size_t)(last - first));
+ }
+
+ template <typename Value>
+ inline void fill(int16_t* first, int16_t* last, Value c)
+ {
+ __stosw((uint16_t*)first, (uint16_t)c, (size_t)(last - first));
+ }
+ #endif
+
+
+ inline void fill(char* first, char* last, const char& c) // It's debateable whether we should use 'char& c' or 'char c' here.
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(char* first, char* last, const int c) // This is used for cases like 'fill(first, last, 0)'.
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(unsigned char* first, unsigned char* last, const unsigned char& c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(unsigned char* first, unsigned char* last, const int c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(signed char* first, signed char* last, const signed char& c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(signed char* first, signed char* last, const int c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__ICL) // ICL = Intel compiler
+ inline void fill(bool* first, bool* last, const bool& b)
+ {
+ memset(first, (char)b, (size_t)(last - first));
+ }
+ #endif
+
+
+
+
+ // fill_n
+ //
+ // We implement some fill helper functions in order to allow us to optimize it
+ // where possible.
+ //
+ template <bool bIsScalar>
+ struct fill_n_imp
+ {
+ template <typename OutputIterator, typename Size, typename T>
+ static OutputIterator do_fill(OutputIterator first, Size n, const T& value)
+ {
+ for(; n-- > 0; ++first)
+ *first = value;
+ return first;
+ }
+ };
+
+ template <>
+ struct fill_n_imp<true>
+ {
+ template <typename OutputIterator, typename Size, typename T>
+ static OutputIterator do_fill(OutputIterator first, Size n, const T& value)
+ {
+ typedef typename eastl::iterator_traits<OutputIterator>::value_type value_type;
+
+ // We create a temp and fill from that because value might alias to
+ // the destination range and so the compiler would be forced into
+ // generating less efficient code.
+ for(const T temp = value; n-- > 0; ++first)
+ *first = static_cast<value_type>(temp);
+ return first;
+ }
+ };
+
+ /// fill_n
+ ///
+ /// The fill_n function is very much like memset in that a copies a source value
+ /// n times into a destination range. The source value may come from within
+ /// the destination range.
+ ///
+ /// Effects: Assigns value through all the iterators in the range [first, first + n).
+ ///
+ /// Complexity: Exactly n assignments.
+ ///
+ template <typename OutputIterator, typename Size, typename T>
+ OutputIterator fill_n(OutputIterator first, Size n, const T& value)
+ {
+ return eastl::fill_n_imp<is_scalar<T>::value>::do_fill(first, n, value);
+ }
+
+ template <typename Size>
+ inline char* fill_n(char* first, Size n, const char& c)
+ {
+ return (char*)memset(first, (char)c, (size_t)n) + n;
+ }
+
+ template <typename Size>
+ inline unsigned char* fill_n(unsigned char* first, Size n, const unsigned char& c)
+ {
+ return (unsigned char*)memset(first, (unsigned char)c, (size_t)n) + n;
+ }
+
+ template <typename Size>
+ inline signed char* fill_n(signed char* first, Size n, const signed char& c)
+ {
+ return (signed char*)memset(first, (signed char)c, n) + (size_t)n;
+ }
+
+ #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__ICL) // ICL = Intel compiler
+ template <typename Size>
+ inline bool* fill_n(bool* first, Size n, const bool& b)
+ {
+ return (bool*)memset(first, (char)b, n) + (size_t)n;
+ }
+ #endif
+
+ #if(defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ #if defined(EA_PROCESSOR_X86_64)
+ template <typename Size, typename Value>
+ inline uint64_t* fill_n(uint64_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ uint64_t value = (uint64_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosq\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+
+
+ template <typename Size, typename Value>
+ inline int64_t* fill_n(int64_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ int64_t value = (int64_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosq\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+ #endif
+
+ template <typename Size, typename Value>
+ inline uint32_t* fill_n(uint32_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ uint32_t value = (uint32_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosl\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+
+
+ template <typename Size, typename Value>
+ inline int32_t* fill_n(int32_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ int32_t value = (int32_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosl\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+
+
+ template <typename Size, typename Value>
+ inline uint16_t* fill_n(uint16_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ uint16_t value = (uint16_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosw\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+
+
+ template <typename Size, typename Value>
+ inline int16_t* fill_n(int16_t* first, Size n, Value c)
+ {
+ uintptr_t count = (uintptr_t)(n);
+ int16_t value = (int16_t)(c);
+
+ __asm__ __volatile__ ("cld\n\t"
+ "rep stosw\n\t"
+ : "+c" (count), "+D" (first), "=m" (first)
+ : "a" (value)
+ : "cc" );
+ return first; // first is updated by the code above.
+ }
+
+ #elif defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64))
+ #if defined(EA_PROCESSOR_X86_64)
+ template <typename Size, typename Value>
+ inline uint64_t* fill_n(uint64_t* first, Size n, Value c)
+ {
+ __stosq(first, (uint64_t)c, (size_t)n);
+ return first + n;
+ }
+
+ template <typename Size, typename Value>
+ inline int64_t* fill_n(int64_t* first, Size n, Value c)
+ {
+ __stosq((uint64_t*)first, (uint64_t)c, (size_t)n);
+ return first + n;
+ }
+ #endif
+
+ template <typename Size, typename Value>
+ inline uint32_t* fill_n(uint32_t* first, Size n, Value c)
+ {
+ __stosd((unsigned long*)first, (unsigned long)c, (size_t)n);
+ return first + n;
+ }
+
+ template <typename Size, typename Value>
+ inline int32_t* fill_n(int32_t* first, Size n, Value c)
+ {
+ __stosd((unsigned long*)first, (unsigned long)c, (size_t)n);
+ return first + n;
+ }
+
+ template <typename Size, typename Value>
+ inline uint16_t* fill_n(uint16_t* first, Size n, Value c)
+ {
+ __stosw(first, (uint16_t)c, (size_t)n);
+ return first + n;
+ }
+
+ template <typename Size, typename Value>
+ inline int16_t* fill_n(int16_t* first, Size n, Value c)
+ {
+ __stosw((uint16_t*)first, (uint16_t)c, (size_t)n);
+ return first + n;
+ }
+ #endif
+
+} // namespace eastl
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/EASTL/internal/fixed_pool.h b/include/EASTL/internal/fixed_pool.h
new file mode 100644
index 0000000..5a38004
--- /dev/null
+++ b/include/EASTL/internal/fixed_pool.h
@@ -0,0 +1,1631 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the following
+// aligned_buffer
+// fixed_pool_base
+// fixed_pool
+// fixed_pool_with_overflow
+// fixed_hashtable_allocator
+// fixed_vector_allocator
+// fixed_swap
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_FIXED_POOL_H
+#define EASTL_INTERNAL_FIXED_POOL_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/functional.h>
+#include <EASTL/memory.h>
+#include <EASTL/allocator.h>
+#include <EASTL/type_traits.h>
+
+
+EA_DISABLE_ALL_VC_WARNINGS();
+#include <new>
+EA_RESTORE_ALL_VC_WARNINGS();
+
+// 4275 - non dll-interface class used as base for DLL-interface classkey 'identifier'
+EA_DISABLE_VC_WARNING(4275);
+
+
+namespace eastl
+{
+
+ /// EASTL_FIXED_POOL_DEFAULT_NAME
+ ///
+ /// Defines a default allocator name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_FIXED_POOL_DEFAULT_NAME
+ #define EASTL_FIXED_POOL_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_pool" // Unless the user overrides something, this is "EASTL fixed_pool".
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // aligned_buffer
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// aligned_buffer
+ ///
+ /// This is useful for creating a buffer of the same size and alignment
+ /// of a given struct or class. This is useful for creating memory pools
+ /// that support both size and alignment requirements of stored objects
+ /// but without wasting space in over-allocating.
+ ///
+ /// Note that we implement this via struct specializations, as some
+ /// compilers such as VC++ do not support specification of alignments
+ /// in any way other than via an integral constant.
+ ///
+ /// Example usage:
+ /// struct Widget{ }; // This class has a given size and alignment.
+ ///
+ /// Declare a char buffer of equal size and alignment to Widget.
+ /// aligned_buffer<sizeof(Widget), EASTL_ALIGN_OF(Widget)> mWidgetBuffer;
+ ///
+ /// Declare an array this time.
+ /// aligned_buffer<sizeof(Widget), EASTL_ALIGN_OF(Widget)> mWidgetArray[15];
+ ///
+ typedef char EASTL_MAY_ALIAS aligned_buffer_char;
+
+ template <size_t size, size_t alignment>
+ struct aligned_buffer { aligned_buffer_char buffer[size]; };
+
+ template<size_t size>
+ struct aligned_buffer<size, 2> { EA_PREFIX_ALIGN(2) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 4> { EA_PREFIX_ALIGN(4) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 8> { EA_PREFIX_ALIGN(8) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(8); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 16> { EA_PREFIX_ALIGN(16) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(16); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 32> { EA_PREFIX_ALIGN(32) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(32); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 64> { EA_PREFIX_ALIGN(64) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(64); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 128> { EA_PREFIX_ALIGN(128) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(128); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 256> { EA_PREFIX_ALIGN(256) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(256); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 512> { EA_PREFIX_ALIGN(512) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(512); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 1024> { EA_PREFIX_ALIGN(1024) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(1024); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 2048> { EA_PREFIX_ALIGN(2048) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2048); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 4096> { EA_PREFIX_ALIGN(4096) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4096); };
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_pool_base
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_pool_base
+ ///
+ /// This is a base class for the implementation of fixed-size pools.
+ /// In particular, the fixed_pool and fixed_pool_with_overflow classes
+ /// are based on fixed_pool_base.
+ ///
+ struct fixed_pool_base
+ {
+ public:
+ /// fixed_pool_base
+ ///
+ fixed_pool_base(void* pMemory = NULL)
+ : mpHead((Link*)pMemory)
+ , mpNext((Link*)pMemory)
+ , mpCapacity((Link*)pMemory)
+ , mnNodeSize(0) // This is normally set in the init function.
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ mnCurrentSize = 0;
+ mnPeakSize = 0;
+ #endif
+ }
+
+
+ /// fixed_pool_base
+ ///
+ // Disabled because the default is sufficient. While it normally makes no sense to deep copy
+ // this data, our usage of this class is such that this is OK and wanted.
+ //
+ // fixed_pool_base(const fixed_pool_base& x)
+ // {
+ // }
+
+
+ /// operator=
+ ///
+ fixed_pool_base& operator=(const fixed_pool_base&)
+ {
+ // By design we do nothing. We don't attempt to deep-copy member data.
+ return *this;
+ }
+
+
+ /// init
+ ///
+ /// Initializes a fixed_pool with a given set of parameters.
+ /// You cannot call this function twice else the resulting
+ /// behaviour will be undefined. You can only call this function
+ /// after constructing the fixed_pool with the default constructor.
+ ///
+ EASTL_API void init(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0);
+
+
+ /// peak_size
+ ///
+ /// Returns the maximum number of outstanding allocations there have been
+ /// at any one time. This represents a high water mark for the allocation count.
+ ///
+ size_t peak_size() const
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ return mnPeakSize;
+ #else
+ return 0;
+ #endif
+ }
+
+
+ /// can_allocate
+ ///
+ /// Returns true if there are any free links.
+ ///
+ bool can_allocate() const
+ {
+ return (mpHead != NULL) || (mpNext != mpCapacity);
+ }
+
+ public:
+ /// Link
+ /// Implements a singly-linked list.
+ struct Link
+ {
+ Link* mpNext;
+ };
+
+ Link* mpHead;
+ Link* mpNext;
+ Link* mpCapacity;
+ size_t mnNodeSize;
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ uint32_t mnCurrentSize; /// Current number of allocated nodes.
+ uint32_t mnPeakSize; /// Max number of allocated nodes at any one time.
+ #endif
+
+ }; // fixed_pool_base
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_pool
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_pool
+ ///
+ /// Implements a simple fixed pool allocator for use by fixed-size containers.
+ /// This is not a generic eastl allocator which can be plugged into an arbitrary
+ /// eastl container, as it simplifies some functions are arguments for the
+ /// purpose of efficiency.
+ ///
+ class EASTL_API fixed_pool : public fixed_pool_base
+ {
+ public:
+ /// fixed_pool
+ ///
+ /// Default constructor. User usually will want to call init() after
+ /// constructing via this constructor. The pMemory argument is for the
+ /// purposes of temporarily storing a pointer to the buffer to be used.
+ /// Even though init may have a pMemory argument, this arg is useful
+ /// for temporary storage, as per copy construction.
+ ///
+ fixed_pool(void* pMemory = NULL)
+ : fixed_pool_base(pMemory)
+ {
+ }
+
+
+ /// fixed_pool
+ ///
+ /// Constructs a fixed_pool with a given set of parameters.
+ ///
+ fixed_pool(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0)
+ {
+ init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+ }
+
+
+ /// fixed_pool
+ ///
+ // Disabled because the default is sufficient. While it normally makes no sense to deep copy
+ // this data, our usage of this class is such that this is OK and wanted.
+ //
+ // fixed_pool(const fixed_pool& x)
+ // {
+ // }
+
+
+ /// operator=
+ ///
+ fixed_pool& operator=(const fixed_pool&)
+ {
+ // By design we do nothing. We don't attempt to deep-copy member data.
+ return *this;
+ }
+
+
+ /// allocate
+ ///
+ /// Allocates a new object of the size specified upon class initialization.
+ /// Returns NULL if there is no more memory.
+ ///
+ void* allocate()
+ {
+ Link* pLink = mpHead;
+
+ if(pLink) // If we have space...
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(++mnCurrentSize > mnPeakSize)
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ mpHead = pLink->mpNext;
+ return pLink;
+ }
+ else
+ {
+ // If there's no free node in the free list, just
+ // allocate another from the reserved memory area
+
+ if(mpNext != mpCapacity)
+ {
+ pLink = mpNext;
+
+ mpNext = reinterpret_cast<Link*>(reinterpret_cast<char*>(mpNext) + mnNodeSize);
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(++mnCurrentSize > mnPeakSize)
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return pLink;
+ }
+
+ return NULL;
+ }
+ }
+
+ void* allocate(size_t /*alignment*/, size_t /*offset*/)
+ {
+ return allocate();
+ }
+
+ /// deallocate
+ ///
+ /// Frees the given object which was allocated by allocate().
+ /// If the given node was not allocated by allocate() then the behaviour
+ /// is undefined.
+ ///
+ void deallocate(void* p)
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ --mnCurrentSize;
+ #endif
+
+ ((Link*)p)->mpNext = mpHead;
+ mpHead = ((Link*)p);
+ }
+
+
+ using fixed_pool_base::can_allocate;
+
+
+ const char* get_name() const
+ {
+ return EASTL_FIXED_POOL_DEFAULT_NAME;
+ }
+
+
+ void set_name(const char*)
+ {
+ // Nothing to do. We don't allocate memory.
+ }
+
+ }; // fixed_pool
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_pool_with_overflow
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_pool_with_overflow
+ ///
+ template <typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_pool_with_overflow : public fixed_pool_base
+ {
+ public:
+ typedef OverflowAllocator overflow_allocator_type;
+
+
+ fixed_pool_with_overflow(void* pMemory = NULL)
+ : fixed_pool_base(pMemory),
+ mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
+ {
+ // Leave mpPoolBegin, mpPoolEnd uninitialized.
+ }
+
+
+ fixed_pool_with_overflow(void* pMemory, const overflow_allocator_type& allocator)
+ : fixed_pool_base(pMemory),
+ mOverflowAllocator(allocator)
+ {
+ // Leave mpPoolBegin, mpPoolEnd uninitialized.
+ }
+
+
+ fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0)
+ : mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
+ {
+ fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+
+ mpPoolBegin = pMemory;
+ }
+
+
+ fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset,
+ const overflow_allocator_type& allocator)
+ : mOverflowAllocator(allocator)
+ {
+ fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+
+ mpPoolBegin = pMemory;
+ }
+
+
+ // Disabled because the default is sufficient. While it normally makes no sense to deep copy
+ // this data, our usage of this class is such that this is OK and wanted.
+ //
+ //fixed_pool_with_overflow(const fixed_pool_with_overflow& x)
+ //{
+ // ...
+ //}
+
+
+ fixed_pool_with_overflow& operator=(const fixed_pool_with_overflow& x)
+ {
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mOverflowAllocator = x.mOverflowAllocator;
+ #else
+ (void)x;
+ #endif
+
+ return *this;
+ }
+
+
+ void init(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0)
+ {
+ fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+
+ mpPoolBegin = pMemory;
+ }
+
+
+ void* allocate()
+ {
+ void* p = NULL;
+ Link* pLink = mpHead;
+
+ if(pLink)
+ {
+ // Unlink from chain
+ p = pLink;
+ mpHead = pLink->mpNext;
+ }
+ else
+ {
+ // If there's no free node in the free list, just
+ // allocate another from the reserved memory area
+
+ if(mpNext != mpCapacity)
+ {
+ p = pLink = mpNext;
+ mpNext = reinterpret_cast<Link*>(reinterpret_cast<char*>(mpNext) + mnNodeSize);
+ }
+ else
+ p = mOverflowAllocator.allocate(mnNodeSize);
+ }
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(p && (++mnCurrentSize > mnPeakSize))
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return p;
+ }
+
+
+ void* allocate(size_t alignment, size_t alignmentOffset)
+ {
+ void* p = NULL;
+ Link* pLink = mpHead;
+
+ if (pLink)
+ {
+ // Unlink from chain
+ p = pLink;
+ mpHead = pLink->mpNext;
+ }
+ else
+ {
+ // If there's no free node in the free list, just
+ // allocate another from the reserved memory area
+
+ if (mpNext != mpCapacity)
+ {
+ p = pLink = mpNext;
+ mpNext = reinterpret_cast<Link*>(reinterpret_cast<char*>(mpNext)+mnNodeSize);
+ }
+ else
+ {
+ p = allocate_memory(mOverflowAllocator, mnNodeSize, alignment, alignmentOffset);
+ EASTL_ASSERT_MSG(p != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+ }
+
+ }
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if (p && (++mnCurrentSize > mnPeakSize))
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return p;
+ }
+
+ void deallocate(void* p)
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ --mnCurrentSize;
+ #endif
+
+ if((p >= mpPoolBegin) && (p < mpCapacity))
+ {
+ ((Link*)p)->mpNext = mpHead;
+ mpHead = ((Link*)p);
+ }
+ else
+ mOverflowAllocator.deallocate(p, (size_t)mnNodeSize);
+ }
+
+
+ using fixed_pool_base::can_allocate;
+
+
+ const char* get_name() const
+ {
+ return mOverflowAllocator.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mOverflowAllocator.set_name(pName);
+ }
+
+
+ const overflow_allocator_type& get_overflow_allocator() const
+ {
+ return mOverflowAllocator;
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator()
+ {
+ return mOverflowAllocator;
+ }
+
+
+ void set_overflow_allocator(const overflow_allocator_type& overflowAllocator)
+ {
+ mOverflowAllocator = overflowAllocator;
+ }
+ public:
+ OverflowAllocator mOverflowAllocator;
+ void* mpPoolBegin; // Ideally we wouldn't need this member variable. he problem is that the information about the pool buffer and object size is stored in the owning container and we can't have access to it without increasing the amount of code we need and by templating more code. It may turn out that simply storing data here is smaller in the end.
+
+ }; // fixed_pool_with_overflow
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_node_allocator
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_node_allocator
+ ///
+ /// Note: This class was previously named fixed_node_pool, but was changed because this name
+ /// was inconsistent with the other allocators here which ended with _allocator.
+ ///
+ /// Implements a fixed_pool with a given node count, alignment, and alignment offset.
+ /// fixed_node_allocator is like fixed_pool except it is templated on the node type instead
+ /// of being a generic allocator. All it does is pass allocations through to
+ /// the fixed_pool base. This functionality is separate from fixed_pool because there
+ /// are other uses for fixed_pool.
+ ///
+ /// We template on kNodeSize instead of node_type because the former allows for the
+ /// two different node_types of the same size to use the same template implementation.
+ ///
+ /// Template parameters:
+ /// nodeSize The size of the object to allocate.
+ /// nodeCount The number of objects the pool contains.
+ /// nodeAlignment The alignment of the objects to allocate.
+ /// nodeAlignmentOffset The alignment offset of the objects to allocate.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_node_allocator
+ {
+ public:
+ typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<OverflowAllocator>, fixed_pool>::type pool_type;
+ typedef fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ public:
+ pool_type mPool;
+
+ public:
+ //fixed_node_allocator(const char* pName)
+ //{
+ // mPool.set_name(pName);
+ //}
+
+
+ fixed_node_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ }
+
+
+ fixed_node_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator)
+ : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator)
+ {
+ }
+
+
+ /// fixed_node_allocator
+ ///
+ /// Note that we are copying x.mpHead to our own fixed_pool. This at first may seem
+ /// broken, as fixed pools cannot take over ownership of other fixed pools' memory.
+ /// However, we declare that this copy ctor can only ever be safely called when
+ /// the user has intentionally pre-seeded the source with the destination pointer.
+ /// This is somewhat playing with fire, but it allows us to get around chicken-and-egg
+ /// problems with containers being their own allocators, without incurring any memory
+ /// costs or extra code costs. There's another reason for this: we very strongly want
+ /// to avoid full copying of instances of fixed_pool around, especially via the stack.
+ /// Larger pools won't even be able to fit on many machine's stacks. So this solution
+ /// is also a mechanism to prevent that situation from existing and being used.
+ /// Perhaps some day we'll find a more elegant yet costless way around this.
+ ///
+ fixed_node_allocator(const this_type& x)
+ : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, x.mPool.mOverflowAllocator)
+ {
+ }
+
+
+ this_type& operator=(const this_type& x)
+ {
+ mPool = x.mPool;
+ return *this;
+ }
+
+
+ void* allocate(size_t n, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate();
+ }
+
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate(alignment, offset);
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ mPool.deallocate(p);
+ }
+
+
+ /// can_allocate
+ ///
+ /// Returns true if there are any free links.
+ ///
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ /// reset
+ ///
+ /// This function unilaterally resets the fixed pool back to a newly initialized
+ /// state. This is useful for using in tandem with container reset functionality.
+ ///
+ void reset(void* pNodeBuffer)
+ {
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mPool.mOverflowAllocator;
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mPool.mOverflowAllocator;
+ }
+
+
+ void set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mPool.mOverflowAllocator = allocator;
+ }
+
+
+ void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ mPool.mOverflowAllocator = x.mPool.mOverflowAllocator;
+ }
+
+ }; // fixed_node_allocator
+
+
+ // This is a near copy of the code above, with the only difference being
+ // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
+ // and the get_overflow_allocator / set_overflow_allocator functions.
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename OverflowAllocator>
+ class fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator>
+ {
+ public:
+ typedef fixed_pool pool_type;
+ typedef fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ public:
+ pool_type mPool;
+
+ public:
+ fixed_node_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ }
+
+
+ fixed_node_allocator(void* pNodeBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
+ : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ }
+
+
+ /// fixed_node_allocator
+ ///
+ /// Note that we are copying x.mpHead to our own fixed_pool. This at first may seem
+ /// broken, as fixed pools cannot take over ownership of other fixed pools' memory.
+ /// However, we declare that this copy ctor can only ever be safely called when
+ /// the user has intentionally pre-seeded the source with the destination pointer.
+ /// This is somewhat playing with fire, but it allows us to get around chicken-and-egg
+ /// problems with containers being their own allocators, without incurring any memory
+ /// costs or extra code costs. There's another reason for this: we very strongly want
+ /// to avoid full copying of instances of fixed_pool around, especially via the stack.
+ /// Larger pools won't even be able to fit on many machine's stacks. So this solution
+ /// is also a mechanism to prevent that situation from existing and being used.
+ /// Perhaps some day we'll find a more elegant yet costless way around this.
+ ///
+ fixed_node_allocator(const this_type& x) // No need to copy the overflow allocator, because bEnableOverflow is false in this specialization.
+ : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ }
+
+
+ this_type& operator=(const this_type& x)
+ {
+ mPool = x.mPool;
+ return *this;
+ }
+
+
+ void* allocate(size_t n, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate();
+ }
+
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate(alignment, offset);
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ mPool.deallocate(p);
+ }
+
+
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ void reset(void* pNodeBuffer)
+ {
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+
+ void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
+ {
+ // We don't have an overflow allocator.
+ EASTL_ASSERT(false);
+ }
+
+
+ void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ // We don't have an overflow allocator.
+ }
+
+ }; // fixed_node_allocator
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator==(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a == &b); // They are only equal if they are the same object.
+ }
+
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator!=(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a != &b); // They are only equal if they are the same object.
+ }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_hashtable_allocator
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_hashtable_allocator
+ ///
+ /// Provides a base class for fixed hashtable allocations.
+ /// To consider: Have this inherit from fixed_node_allocator.
+ ///
+ /// Template parameters:
+ /// bucketCount The fixed number of hashtable buckets to provide.
+ /// nodeCount The number of objects the pool contains.
+ /// nodeAlignment The alignment of the objects to allocate.
+ /// nodeAlignmentOffset The alignment offset of the objects to allocate.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_hashtable_allocator
+ {
+ public:
+ typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<OverflowAllocator>, fixed_pool>::type pool_type;
+ typedef fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
+ kBucketsSize = bucketCount * sizeof(void*),
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset,
+ kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
+ };
+
+ protected:
+ pool_type mPool;
+ void* mpBucketBuffer;
+
+ public:
+ // Disabled because it causes compile conflicts.
+ //fixed_hashtable_allocator(const char* pName)
+ //{
+ // mPool.set_name(pName);
+ //}
+
+ fixed_hashtable_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(NULL)
+ {
+ // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator),
+ mpBucketBuffer(NULL)
+ {
+ // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(pBucketBuffer)
+ {
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer, const overflow_allocator_type& allocator)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator),
+ mpBucketBuffer(pBucketBuffer)
+ {
+ }
+
+
+ /// fixed_hashtable_allocator
+ ///
+ /// Note that we are copying x.mpHead and mpBucketBuffer to our own fixed_pool.
+ /// See the discussion above in fixed_node_allocator for important information about this.
+ ///
+ fixed_hashtable_allocator(const this_type& x)
+ : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, x.mPool.mOverflowAllocator),
+ mpBucketBuffer(x.mpBucketBuffer)
+ {
+ }
+
+
+ fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x)
+ {
+ mPool = x.mPool;
+ return *this;
+ }
+
+
+ void* allocate(size_t n, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
+
+ if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); EA_UNUSED(n);
+ return mPool.allocate();
+ }
+
+ // If bucket size no longer fits within local buffer...
+ if ((flags & kAllocFlagBuckets) == kAllocFlagBuckets && (n > kBucketsSize))
+ return get_overflow_allocator().allocate(n);
+
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ if ((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); EA_UNUSED(n);
+ return mPool.allocate(alignment, offset);
+ }
+
+ // If bucket size no longer fits within local buffer...
+ if ((flags & kAllocFlagBuckets) == kAllocFlagBuckets && (n > kBucketsSize))
+ return get_overflow_allocator().allocate(n, alignment, offset);
+
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
+ mPool.deallocate(p);
+ }
+
+
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ void reset(void* pNodeBuffer)
+ {
+ // No need to modify mpBucketBuffer, as that is constant.
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ const overflow_allocator_type& get_overflow_allocator() const
+ {
+ return mPool.mOverflowAllocator;
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator()
+ {
+ return mPool.mOverflowAllocator;
+ }
+
+
+ void set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mPool.mOverflowAllocator = allocator;
+ }
+
+
+ void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ mPool.mOverflowAllocator = x.mPool.mOverflowAllocator;
+ }
+
+ }; // fixed_hashtable_allocator
+
+
+ // This is a near copy of the code above, with the only difference being
+ // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
+ // and the get_overflow_allocator / set_overflow_allocator functions.
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename OverflowAllocator>
+ class fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator>
+ {
+ public:
+ typedef fixed_pool pool_type;
+ typedef fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
+ kBucketsSize = bucketCount * sizeof(void*),
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset,
+ kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
+ };
+
+ protected:
+ pool_type mPool;
+ void* mpBucketBuffer;
+
+ public:
+ // Disabled because it causes compile conflicts.
+ //fixed_hashtable_allocator(const char* pName)
+ //{
+ // mPool.set_name(pName);
+ //}
+
+ fixed_hashtable_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(NULL)
+ {
+ // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
+ }
+
+ fixed_hashtable_allocator(void* pNodeBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(NULL)
+ {
+ // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(pBucketBuffer)
+ {
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(pBucketBuffer)
+ {
+ }
+
+
+ /// fixed_hashtable_allocator
+ ///
+ /// Note that we are copying x.mpHead and mpBucketBuffer to our own fixed_pool.
+ /// See the discussion above in fixed_node_allocator for important information about this.
+ ///
+ fixed_hashtable_allocator(const this_type& x) // No need to copy the overflow allocator, because bEnableOverflow is false in this specialization.
+ : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(x.mpBucketBuffer)
+ {
+ }
+
+
+ fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x)
+ {
+ mPool = x.mPool;
+ return *this;
+ }
+
+
+ void* allocate(size_t n, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
+ if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
+ return mPool.allocate();
+ }
+
+ // Don't allow hashtable buckets to overflow in this case.
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
+ return mPool.allocate(alignment, offset);
+ }
+
+ // Don't allow hashtable buckets to overflow in this case.
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
+ mPool.deallocate(p);
+ }
+
+
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ void reset(void* pNodeBuffer)
+ {
+ // No need to modify mpBucketBuffer, as that is constant.
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+ void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
+ {
+ // We don't have an overflow allocator.
+ EASTL_ASSERT(false);
+ }
+
+ void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ // We don't have an overflow allocator.
+ }
+
+ }; // fixed_hashtable_allocator
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator==(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a == &b); // They are only equal if they are the same object.
+ }
+
+
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator!=(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a != &b); // They are only equal if they are the same object.
+ }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_vector_allocator
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_vector_allocator
+ ///
+ /// Template parameters:
+ /// nodeSize The size of individual objects.
+ /// nodeCount The number of objects the pool contains.
+ /// nodeAlignment The alignment of the objects to allocate.
+ /// nodeAlignmentOffset The alignment offset of the objects to allocate.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator = EASTLAllocatorType>
+ class fixed_vector_allocator
+ {
+ public:
+ typedef fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ public:
+ overflow_allocator_type mOverflowAllocator;
+ void* mpPoolBegin; // To consider: Find some way to make this data unnecessary, without increasing template proliferation.
+
+ public:
+ // Disabled because it causes compile conflicts.
+ //fixed_vector_allocator(const char* pName = NULL)
+ //{
+ // mOverflowAllocator.set_name(pName);
+ //}
+
+ fixed_vector_allocator(void* pNodeBuffer = nullptr)
+ : mpPoolBegin(pNodeBuffer)
+ {
+ }
+
+ fixed_vector_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator)
+ : mOverflowAllocator(allocator), mpPoolBegin(pNodeBuffer)
+ {
+ }
+
+ // Disabled because the default is sufficient.
+ //fixed_vector_allocator(const fixed_vector_allocator& x)
+ //{
+ // mpPoolBegin = x.mpPoolBegin;
+ // mOverflowAllocator = x.mOverflowAllocator;
+ //}
+
+ fixed_vector_allocator& operator=(const fixed_vector_allocator& x)
+ {
+ // We leave our mpPoolBegin variable alone.
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mOverflowAllocator = x.mOverflowAllocator;
+ #else
+ (void)x;
+ #endif
+
+ return *this;
+ }
+
+ void* allocate(size_t n, int flags = 0)
+ {
+ return mOverflowAllocator.allocate(n, flags);
+ }
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
+ {
+ return mOverflowAllocator.allocate(n, alignment, offset, flags);
+ }
+
+ void deallocate(void* p, size_t n)
+ {
+ if(p != mpPoolBegin)
+ mOverflowAllocator.deallocate(p, n); // Can't do this to our own allocation.
+ }
+
+ const char* get_name() const
+ {
+ return mOverflowAllocator.get_name();
+ }
+
+ void set_name(const char* pName)
+ {
+ mOverflowAllocator.set_name(pName);
+ }
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
+ {
+ return mOverflowAllocator;
+ }
+
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
+ {
+ return mOverflowAllocator;
+ }
+
+ void set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mOverflowAllocator = allocator;
+ }
+
+ void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ mOverflowAllocator = x.mOverflowAllocator;
+ }
+
+ }; // fixed_vector_allocator
+
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename OverflowAllocator>
+ class fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator>
+ {
+ public:
+ typedef fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator> this_type;
+ typedef OverflowAllocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ // Disabled because it causes compile conflicts.
+ //fixed_vector_allocator(const char* = NULL) // This char* parameter is present so that this class can be like the other version.
+ //{
+ //}
+
+ fixed_vector_allocator()
+ {
+ }
+
+ fixed_vector_allocator(void* /*pNodeBuffer*/)
+ {
+ }
+
+ fixed_vector_allocator(void* /*pNodeBuffer*/, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
+ {
+ }
+
+ /// fixed_vector_allocator
+ ///
+ // Disabled because there is nothing to do. No member data. And the default for this is sufficient.
+ // fixed_vector_allocator(const fixed_vector_allocator&)
+ // {
+ // }
+
+ // Disabled because there is nothing to do. No member data.
+ //fixed_vector_allocator& operator=(const fixed_vector_allocator& x)
+ //{
+ // return *this;
+ //}
+
+ void* allocate(size_t /*n*/, int /*flags*/ = 0)
+ {
+ EASTL_ASSERT(false); // A fixed_vector should not reallocate, else the user has exhausted its space.
+ return NULL;
+ }
+
+ void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
+ {
+ EASTL_ASSERT(false);
+ return NULL;
+ }
+
+ void deallocate(void* /*p*/, size_t /*n*/)
+ {
+ }
+
+ const char* get_name() const
+ {
+ return EASTL_FIXED_POOL_DEFAULT_NAME;
+ }
+
+ void set_name(const char* /*pName*/)
+ {
+ }
+
+ const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+ overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
+ }
+
+ void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
+ {
+ // We don't have an overflow allocator.
+ EASTL_ASSERT(false);
+ }
+
+ void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
+ {
+ // We don't have an overflow allocator.
+ }
+
+ }; // fixed_vector_allocator
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator==(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a == &b); // They are only equal if they are the same object.
+ }
+
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
+ inline bool operator!=(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
+ const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
+ {
+ return (&a != &b); // They are only equal if they are the same object.
+ }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_swap
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_swap
+ ///
+ /// This function implements a swap suitable for fixed containers.
+ /// This is an issue because the size of fixed containers can be very
+ /// large, due to their having the container buffer within themselves.
+ /// Note that we are referring to sizeof(container) and not the total
+ /// sum of memory allocated by the container from the heap.
+ ///
+ ///
+ /// This implementation switches at compile time whether or not the
+ /// temporary is allocated on the stack or the heap as some compilers
+ /// will allocate the (large) stack frame regardless of which code
+ /// path is picked.
+ template <typename Container, bool UseHeapTemporary>
+ class fixed_swap_impl
+ {
+ public:
+ static void swap(Container& a, Container& b);
+ };
+
+
+ template <typename Container>
+ class fixed_swap_impl<Container, false>
+ {
+ public:
+ static void swap(Container& a, Container& b)
+ {
+ Container temp(EASTL_MOVE(a)); // Can't use global swap because that could
+ a = EASTL_MOVE(b); // itself call this swap function in return.
+ b = EASTL_MOVE(temp);
+ }
+ };
+
+
+ template <typename Container>
+ class fixed_swap_impl<Container, true>
+ {
+ public:
+ static void swap(Container& a, Container& b)
+ {
+ EASTLAllocatorType allocator(*EASTLAllocatorDefault(), EASTL_TEMP_DEFAULT_NAME);
+ void* const pMemory = allocator.allocate(sizeof(a));
+
+ if(pMemory)
+ {
+ Container* pTemp = ::new(pMemory) Container(EASTL_MOVE(a));
+ a = EASTL_MOVE(b);
+ b = EASTL_MOVE(*pTemp);
+
+ pTemp->~Container();
+ allocator.deallocate(pMemory, sizeof(a));
+ }
+ }
+ };
+
+
+ template<typename Container>
+ void fixed_swap(Container& a, Container& b)
+ {
+ return fixed_swap_impl<Container, sizeof(Container) >= EASTL_MAX_STACK_USAGE>::swap(a, b);
+ }
+
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/include/EASTL/internal/function.h b/include/EASTL/internal/function.h
new file mode 100644
index 0000000..6e857f0
--- /dev/null
+++ b/include/EASTL/internal/function.h
@@ -0,0 +1,161 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FUNCTION_H
+#define EASTL_FUNCTION_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/function_detail.h>
+
+namespace eastl
+{
+
+ /// EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE
+ ///
+ /// Defines the size of the SSO buffer which is used to hold the specified capture state of the callable.
+ ///
+ #ifndef EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE
+ #define EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE (2 * sizeof(void*))
+ #endif
+
+ static_assert(EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE >= sizeof(void*), "functor storage must be able to hold at least a pointer!");
+
+ template <typename>
+ class function;
+
+ template <typename R, typename... Args>
+ class function<R(Args...)> : public internal::function_detail<EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE, R(Args...)>
+ {
+ private:
+ using Base = internal::function_detail<EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE, R(Args...)>;
+ public:
+ using typename Base::result_type;
+
+ function() EA_NOEXCEPT = default;
+ function(std::nullptr_t p) EA_NOEXCEPT
+ : Base(p)
+ {
+ }
+
+ function(const function& other)
+ : Base(other)
+ {
+ }
+
+ function(function&& other)
+ : Base(eastl::move(other))
+ {
+ }
+
+ template <typename Functor, typename = EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(Functor, R, Args..., Base, function)>
+ function(Functor functor)
+ : Base(eastl::move(functor))
+ {
+ }
+
+ ~function() EA_NOEXCEPT = default;
+
+ function& operator=(const function& other)
+ {
+ Base::operator=(other);
+ return *this;
+ }
+
+ function& operator=(function&& other)
+ {
+ Base::operator=(eastl::move(other));
+ return *this;
+ }
+
+ function& operator=(std::nullptr_t p) EA_NOEXCEPT
+ {
+ Base::operator=(p);
+ return *this;
+ }
+
+ template <typename Functor, typename = EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(Functor, R, Args..., Base, function)>
+ function& operator=(Functor&& functor)
+ {
+ Base::operator=(eastl::forward<Functor>(functor));
+ return *this;
+ }
+
+ template <typename Functor>
+ function& operator=(eastl::reference_wrapper<Functor> f) EA_NOEXCEPT
+ {
+ Base::operator=(f);
+ return *this;
+ }
+
+ void swap(function& other) EA_NOEXCEPT
+ {
+ Base::swap(other);
+ }
+
+ explicit operator bool() const EA_NOEXCEPT
+ {
+ return Base::operator bool();
+ }
+
+ R operator ()(Args... args) const
+ {
+ return Base::operator ()(eastl::forward<Args>(args)...);
+ }
+
+ #if EASTL_RTTI_ENABLED
+ const std::type_info& target_type() const EA_NOEXCEPT
+ {
+ return Base::target_type();
+ }
+
+ template <typename Functor>
+ Functor* target() EA_NOEXCEPT
+ {
+ return Base::target();
+ }
+
+ template <typename Functor>
+ const Functor* target() const EA_NOEXCEPT
+ {
+ return Base::target();
+ }
+ #endif // EASTL_RTTI_ENABLED
+ };
+
+ template <typename R, typename... Args>
+ bool operator==(const function<R(Args...)>& f, std::nullptr_t) EA_NOEXCEPT
+ {
+ return !f;
+ }
+
+ template <typename R, typename... Args>
+ bool operator==(std::nullptr_t, const function<R(Args...)>& f) EA_NOEXCEPT
+ {
+ return !f;
+ }
+
+ template <typename R, typename... Args>
+ bool operator!=(const function<R(Args...)>& f, std::nullptr_t) EA_NOEXCEPT
+ {
+ return !!f;
+ }
+
+ template <typename R, typename... Args>
+ bool operator!=(std::nullptr_t, const function<R(Args...)>& f) EA_NOEXCEPT
+ {
+ return !!f;
+ }
+
+ template <typename R, typename... Args>
+ void swap(function<R(Args...)>& lhs, function<R(Args...)>& rhs)
+ {
+ lhs.swap(rhs);
+ }
+
+} // namespace eastl
+
+#endif // EASTL_FUNCTION_H
diff --git a/include/EASTL/internal/function_detail.h b/include/EASTL/internal/function_detail.h
new file mode 100644
index 0000000..dc18b63
--- /dev/null
+++ b/include/EASTL/internal/function_detail.h
@@ -0,0 +1,673 @@
+///////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FUNCTION_DETAIL_H
+#define EASTL_FUNCTION_DETAIL_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EABase/eabase.h>
+#include <EABase/nullptr.h>
+#include <EABase/config/eacompilertraits.h>
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/functional_base.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/internal/function_help.h>
+
+#include <EASTL/type_traits.h>
+#include <EASTL/utility.h>
+#include <EASTL/allocator.h>
+
+#if EASTL_RTTI_ENABLED
+ #include <typeinfo>
+#endif
+
+#if EASTL_EXCEPTIONS_ENABLED
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <new>
+ #include <exception>
+ EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+namespace eastl
+{
+ #if EASTL_EXCEPTIONS_ENABLED
+ class bad_function_call : public std::exception
+ {
+ public:
+ bad_function_call() EA_NOEXCEPT = default;
+
+ const char* what() const EA_NOEXCEPT EA_OVERRIDE
+ {
+ return "bad function_detail call";
+ }
+ };
+ #endif
+
+ namespace internal
+ {
+ class unused_class {};
+
+ union functor_storage_alignment
+ {
+ void (*unused_func_ptr)(void);
+ void (unused_class::*unused_func_mem_ptr)(void);
+ void* unused_ptr;
+ };
+
+ template <int SIZE_IN_BYTES>
+ struct functor_storage
+ {
+ static_assert(SIZE_IN_BYTES >= 0, "local buffer storage cannot have a negative size!");
+ template <typename Ret>
+ Ret& GetStorageTypeRef() const
+ {
+ return *reinterpret_cast<Ret*>(const_cast<char*>(&storage[0]));
+ }
+
+ union
+ {
+ functor_storage_alignment align;
+ char storage[SIZE_IN_BYTES];
+ };
+ };
+
+ template <>
+ struct functor_storage<0>
+ {
+ template <typename Ret>
+ Ret& GetStorageTypeRef() const
+ {
+ return *reinterpret_cast<Ret*>(const_cast<char*>(&storage[0]));
+ }
+
+ union
+ {
+ functor_storage_alignment align;
+ char storage[sizeof(functor_storage_alignment)];
+ };
+ };
+
+ template <typename Functor, int SIZE_IN_BYTES>
+ struct is_functor_inplace_allocatable
+ {
+ static constexpr bool value =
+ sizeof(Functor) <= sizeof(functor_storage<SIZE_IN_BYTES>) &&
+ (eastl::alignment_of_v<functor_storage<SIZE_IN_BYTES>> % eastl::alignment_of_v<Functor>) == 0;
+ };
+
+
+ /// function_base_detail
+ ///
+ template <int SIZE_IN_BYTES>
+ class function_base_detail
+ {
+ public:
+ using FunctorStorageType = functor_storage<SIZE_IN_BYTES>;
+ FunctorStorageType mStorage;
+
+ enum ManagerOperations : int
+ {
+ MGROPS_DESTRUCT_FUNCTOR = 0,
+ MGROPS_COPY_FUNCTOR = 1,
+ MGROPS_MOVE_FUNCTOR = 2,
+ #if EASTL_RTTI_ENABLED
+ MGROPS_GET_TYPE_INFO = 3,
+ MGROPS_GET_FUNC_PTR = 4,
+ #endif
+ };
+
+ // Functor can be allocated inplace
+ template <typename Functor, typename = void>
+ class function_manager_base
+ {
+ public:
+
+ static Functor* GetFunctorPtr(const FunctorStorageType& storage) EA_NOEXCEPT
+ {
+ return &(storage.template GetStorageTypeRef<Functor>());
+ }
+
+ template <typename T>
+ static void CreateFunctor(FunctorStorageType& storage, T&& functor)
+ {
+ ::new (GetFunctorPtr(storage)) Functor(eastl::forward<T>(functor));
+ }
+
+ static void DestructFunctor(FunctorStorageType& storage)
+ {
+ GetFunctorPtr(storage)->~Functor();
+ }
+
+ static void CopyFunctor(FunctorStorageType& to, const FunctorStorageType& from)
+ {
+ ::new (GetFunctorPtr(to)) Functor(*GetFunctorPtr(from));
+ }
+
+ static void MoveFunctor(FunctorStorageType& to, FunctorStorageType& from) EA_NOEXCEPT
+ {
+ ::new (GetFunctorPtr(to)) Functor(eastl::move(*GetFunctorPtr(from)));
+ }
+
+ static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT
+ {
+ switch (ops)
+ {
+ case MGROPS_DESTRUCT_FUNCTOR:
+ {
+ DestructFunctor(*static_cast<FunctorStorageType*>(to));
+ }
+ break;
+ case MGROPS_COPY_FUNCTOR:
+ {
+ CopyFunctor(*static_cast<FunctorStorageType*>(to),
+ *static_cast<const FunctorStorageType*>(from));
+ }
+ break;
+ case MGROPS_MOVE_FUNCTOR:
+ {
+ MoveFunctor(*static_cast<FunctorStorageType*>(to), *static_cast<FunctorStorageType*>(from));
+ DestructFunctor(*static_cast<FunctorStorageType*>(from));
+ }
+ break;
+ default:
+ break;
+ }
+ return nullptr;
+ }
+ };
+
+ // Functor is allocated on the heap
+ template <typename Functor>
+ class function_manager_base<Functor, typename eastl::enable_if<!is_functor_inplace_allocatable<Functor, SIZE_IN_BYTES>::value>::type>
+ {
+ public:
+ static Functor* GetFunctorPtr(const FunctorStorageType& storage) EA_NOEXCEPT
+ {
+ return storage.template GetStorageTypeRef<Functor*>();
+ }
+
+ static Functor*& GetFunctorPtrRef(const FunctorStorageType& storage) EA_NOEXCEPT
+ {
+ return storage.template GetStorageTypeRef<Functor*>();
+ }
+
+ template <typename T>
+ static void CreateFunctor(FunctorStorageType& storage, T&& functor)
+ {
+ auto& allocator = *EASTLAllocatorDefault();
+ Functor* func = static_cast<Functor*>(allocator.allocate(sizeof(Functor), alignof(Functor), 0));
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ if (!func)
+ {
+ throw std::bad_alloc();
+ }
+ #else
+ EASTL_ASSERT_MSG(func != nullptr, "Allocation failed!");
+ #endif
+
+ ::new (static_cast<void*>(func)) Functor(eastl::forward<T>(functor));
+ GetFunctorPtrRef(storage) = func;
+ }
+
+ static void DestructFunctor(FunctorStorageType& storage)
+ {
+ Functor* func = GetFunctorPtr(storage);
+ if (func)
+ {
+ auto& allocator = *EASTLAllocatorDefault();
+ func->~Functor();
+ allocator.deallocate(static_cast<void*>(func), sizeof(Functor));
+ }
+ }
+
+ static void CopyFunctor(FunctorStorageType& to, const FunctorStorageType& from)
+ {
+ auto& allocator = *EASTLAllocatorDefault();
+ Functor* func = static_cast<Functor*>(allocator.allocate(sizeof(Functor), alignof(Functor), 0));
+ #if EASTL_EXCEPTIONS_ENABLED
+ if (!func)
+ {
+ throw std::bad_alloc();
+ }
+ #else
+ EASTL_ASSERT_MSG(func != nullptr, "Allocation failed!");
+ #endif
+ ::new (static_cast<void*>(func)) Functor(*GetFunctorPtr(from));
+ GetFunctorPtrRef(to) = func;
+ }
+
+ static void MoveFunctor(FunctorStorageType& to, FunctorStorageType& from) EA_NOEXCEPT
+ {
+ Functor* func = GetFunctorPtr(from);
+ GetFunctorPtrRef(to) = func;
+ GetFunctorPtrRef(from) = nullptr;
+ }
+
+ static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT
+ {
+ switch (ops)
+ {
+ case MGROPS_DESTRUCT_FUNCTOR:
+ {
+ DestructFunctor(*static_cast<FunctorStorageType*>(to));
+ }
+ break;
+ case MGROPS_COPY_FUNCTOR:
+ {
+ CopyFunctor(*static_cast<FunctorStorageType*>(to),
+ *static_cast<const FunctorStorageType*>(from));
+ }
+ break;
+ case MGROPS_MOVE_FUNCTOR:
+ {
+ MoveFunctor(*static_cast<FunctorStorageType*>(to), *static_cast<FunctorStorageType*>(from));
+ // Moved ptr, no need to destruct ourselves
+ }
+ break;
+ default:
+ break;
+ }
+ return nullptr;
+ }
+ };
+
+ template <typename Functor, typename R, typename... Args>
+ class function_manager final : public function_manager_base<Functor>
+ {
+ public:
+ using Base = function_manager_base<Functor>;
+
+ #if EASTL_RTTI_ENABLED
+ static void* GetTypeInfo() EA_NOEXCEPT
+ {
+ return reinterpret_cast<void*>(const_cast<std::type_info*>(&typeid(Functor)));
+ }
+
+ static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT
+ {
+ switch (ops)
+ {
+ case MGROPS_GET_TYPE_INFO:
+ {
+ return GetTypeInfo();
+ }
+ break;
+ case MGROPS_GET_FUNC_PTR:
+ {
+ return static_cast<void*>(Base::GetFunctorPtr(*static_cast<FunctorStorageType*>(to)));
+ }
+ break;
+ default:
+ {
+ return Base::Manager(to, from, ops);
+ }
+ break;
+ }
+ }
+ #endif // EASTL_RTTI_ENABLED
+
+ /**
+ * NOTE:
+ *
+ * The order of arguments here is vital to the call optimization. Let's dig into why and look at some asm.
+ * We have two invoker signatures to consider:
+ * R Invoker(const FunctorStorageType& functor, Args... args)
+ * R Invoker(Args... args, const FunctorStorageType& functor)
+ *
+ * Assume we are using the Windows x64 Calling Convention where the first 4 arguments are passed into
+ * RCX, RDX, R8, R9. This optimization works for any Calling Convention, we are just using Windows x64 for
+ * this example.
+ *
+ * Given the following member function: void TestMemberFunc(int a, int b)
+ * RCX == this
+ * RDX == a
+ * R8 == b
+ *
+ * All three arguments to the function including the hidden this pointer, which in C++ is always the first argument
+ * are passed into the first three registers.
+ * The function call chain for eastl::function<>() is as follows:
+ * operator ()(this, Args... args) -> Invoker(Args... args, this->mStorage) -> StoredFunction(Args... arg)
+ *
+ * Let's look at what is happening at the asm level with the different Invoker function signatures and why.
+ *
+ * You will notice that operator ()() and Invoker() have the arguments reversed. operator ()() just directly calls
+ * to Invoker(), it is a tail call, so we force inline the call operator to ensure we directly call to the Invoker().
+ * Most compilers always inline it anyways by default; have been instances where it doesn't even though the asm ends
+ * up being cheaper.
+ * call -> call -> call versus call -> call
+ *
+ * eastl::function<int(int, int)> = FunctionPointer
+ *
+ * Assume we have the above eastl::function object that holds a pointer to a function as the internal callable.
+ *
+ * Invoker(this->mStorage, Args... args) is called with the follow arguments in registers:
+ * RCX = this | RDX = a | R8 = b
+ *
+ * Inside Invoker() we use RCX to deference into the eastl::function object and get the function pointer to call.
+ * This function to call has signature Func(int, int) and thus requires its arguments in registers RCX and RDX.
+ * The compiler must shift all the arguments towards the left. The full asm looks something as follows.
+ *
+ * Calling Invoker: Inside Invoker:
+ *
+ * mov rcx, this mov rax, [rcx]
+ * mov rdx, a mov rcx, rdx
+ * mov r8, b mov rdx, r8
+ * call [rcx + offset to Invoker] jmp [rax]
+ *
+ * Notice how the compiler shifts all the arguments before calling the callable and also we only use the this pointer
+ * to access the internal storage inside the eastl::function object.
+ *
+ * Invoker(Args... args, this->mStorage) is called with the following arguments in registers:
+ * RCX = a | RDX = b | R8 = this
+ *
+ * You can see we no longer have to shift the arguments down when going to call the internal stored callable.
+ *
+ * Calling Invoker: Inside Invoker:
+ *
+ * mov rcx, a mov rax, [r8]
+ * mov rdx, b jmp [rax]
+ * mov r8, this
+ * call [r8 + offset to Invoker]
+ *
+ * The generated asm does a straight tail jmp to the loaded function pointer. The arguments are already in the correct
+ * registers.
+ *
+ * For Functors or Lambdas with no captures, this gives us another free register to use to pass arguments since the this
+ * is at the end, it can be passed onto the stack if we run out of registers. Since the callable has no captures; inside
+ * the Invoker(), we won't ever need to touch this thus we can just call the operator ()() or let the compiler inline it.
+ *
+ * For a callable with captures there is no perf hit since the callable in the common case is inlined and the pointer to the callable
+ * buffer is passed in a register which the compiler can use to access the captures.
+ *
+ * For eastl::function<void(const T&, int, int)> that a holds a pointer to member function. The this pointers is implicitly
+ * the first argument in the argument list, const T&, and the member function pointer will be called on that object.
+ * This prevents any argument shifting since the this for the member function pointer is already in RCX.
+ *
+ * This is why having this at the end of the argument list is important for generating efficient Invoker() thunks.
+ */
+ static R Invoker(Args... args, const FunctorStorageType& functor)
+ {
+ return eastl::invoke(*Base::GetFunctorPtr(functor), eastl::forward<Args>(args)...);
+ }
+ };
+
+ function_base_detail() EA_NOEXCEPT = default;
+ ~function_base_detail() EA_NOEXCEPT = default;
+ };
+
+ #define EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, BASE, MYSELF) \
+ typename eastl::enable_if_t<eastl::is_invocable_r_v<RET, FUNCTOR, ARGS> && \
+ !eastl::is_base_of_v<BASE, eastl::decay_t<FUNCTOR>> && \
+ !eastl::is_same_v<eastl::decay_t<FUNCTOR>, MYSELF>>
+
+ #define EASTL_INTERNAL_FUNCTION_DETAIL_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, MYSELF) \
+ EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, MYSELF, MYSELF)
+
+
+ /// function_detail
+ ///
+ template <int, typename>
+ class function_detail;
+
+ template <int SIZE_IN_BYTES, typename R, typename... Args>
+ class function_detail<SIZE_IN_BYTES, R(Args...)> : public function_base_detail<SIZE_IN_BYTES>
+ {
+ public:
+ using result_type = R;
+
+ protected:
+ using Base = function_base_detail<SIZE_IN_BYTES>;
+ using FunctorStorageType = typename function_base_detail<SIZE_IN_BYTES>::FunctorStorageType;
+ using Base::mStorage;
+
+ public:
+ function_detail() EA_NOEXCEPT = default;
+ function_detail(std::nullptr_t) EA_NOEXCEPT {}
+
+ function_detail(const function_detail& other)
+ {
+ if (this != &other)
+ {
+ Copy(other);
+ }
+ }
+
+ function_detail(function_detail&& other)
+ {
+ if (this != &other)
+ {
+ Move(eastl::move(other));
+ }
+ }
+
+ template <typename Functor, typename = EASTL_INTERNAL_FUNCTION_DETAIL_VALID_FUNCTION_ARGS(Functor, R, Args..., function_detail)>
+ function_detail(Functor functor)
+ {
+ CreateForwardFunctor(eastl::move(functor));
+ }
+
+ ~function_detail() EA_NOEXCEPT
+ {
+ Destroy();
+ }
+
+ function_detail& operator=(const function_detail& other)
+ {
+ if (this != &other)
+ {
+ Destroy();
+ Copy(other);
+ }
+
+ return *this;
+ }
+
+ function_detail& operator=(function_detail&& other)
+ {
+ if(this != &other)
+ {
+ Destroy();
+ Move(eastl::move(other));
+ }
+
+ return *this;
+ }
+
+ function_detail& operator=(std::nullptr_t) EA_NOEXCEPT
+ {
+ Destroy();
+ mMgrFuncPtr = nullptr;
+ mInvokeFuncPtr = &DefaultInvoker;
+
+ return *this;
+ }
+
+ template <typename Functor, typename = EASTL_INTERNAL_FUNCTION_DETAIL_VALID_FUNCTION_ARGS(Functor, R, Args..., function_detail)>
+ function_detail& operator=(Functor&& functor)
+ {
+ Destroy();
+ CreateForwardFunctor(eastl::forward<Functor>(functor));
+ return *this;
+ }
+
+ template <typename Functor>
+ function_detail& operator=(eastl::reference_wrapper<Functor> f) EA_NOEXCEPT
+ {
+ Destroy();
+ CreateForwardFunctor(f);
+ return *this;
+ }
+
+ void swap(function_detail& other) EA_NOEXCEPT
+ {
+ if(this == &other)
+ return;
+
+ FunctorStorageType tempStorage;
+ if (other.HaveManager())
+ {
+ (void)(*other.mMgrFuncPtr)(static_cast<void*>(&tempStorage), static_cast<void*>(&other.mStorage),
+ Base::ManagerOperations::MGROPS_MOVE_FUNCTOR);
+ }
+
+ if (HaveManager())
+ {
+ (void)(*mMgrFuncPtr)(static_cast<void*>(&other.mStorage), static_cast<void*>(&mStorage),
+ Base::ManagerOperations::MGROPS_MOVE_FUNCTOR);
+ }
+
+ if (other.HaveManager())
+ {
+ (void)(*other.mMgrFuncPtr)(static_cast<void*>(&mStorage), static_cast<void*>(&tempStorage),
+ Base::ManagerOperations::MGROPS_MOVE_FUNCTOR);
+ }
+
+ eastl::swap(mMgrFuncPtr, other.mMgrFuncPtr);
+ eastl::swap(mInvokeFuncPtr, other.mInvokeFuncPtr);
+ }
+
+ explicit operator bool() const EA_NOEXCEPT
+ {
+ return HaveManager();
+ }
+
+ EASTL_FORCE_INLINE R operator ()(Args... args) const
+ {
+ return (*mInvokeFuncPtr)(eastl::forward<Args>(args)..., this->mStorage);
+ }
+
+ #if EASTL_RTTI_ENABLED
+ const std::type_info& target_type() const EA_NOEXCEPT
+ {
+ if (HaveManager())
+ {
+ void* ret = (*mMgrFuncPtr)(nullptr, nullptr, Base::ManagerOperations::MGROPS_GET_TYPE_INFO);
+ return *(static_cast<const std::type_info*>(ret));
+ }
+ return typeid(void);
+ }
+
+ template <typename Functor>
+ Functor* target() EA_NOEXCEPT
+ {
+ if (HaveManager() && target_type() == typeid(Functor))
+ {
+ void* ret = (*mMgrFuncPtr)(static_cast<void*>(&mStorage), nullptr,
+ Base::ManagerOperations::MGROPS_GET_FUNC_PTR);
+ return ret ? static_cast<Functor*>(ret) : nullptr;
+ }
+ return nullptr;
+ }
+
+ template <typename Functor>
+ const Functor* target() const EA_NOEXCEPT
+ {
+ if (HaveManager() && target_type() == typeid(Functor))
+ {
+ void* ret = (*mMgrFuncPtr)(static_cast<void*>(&mStorage), nullptr,
+ Base::ManagerOperations::MGROPS_GET_FUNC_PTR);
+ return ret ? static_cast<const Functor*>(ret) : nullptr;
+ }
+ return nullptr;
+ }
+ #endif // EASTL_RTTI_ENABLED
+
+ private:
+ bool HaveManager() const EA_NOEXCEPT
+ {
+ return (mMgrFuncPtr != nullptr);
+ }
+
+ void Destroy() EA_NOEXCEPT
+ {
+ if (HaveManager())
+ {
+ (void)(*mMgrFuncPtr)(static_cast<void*>(&mStorage), nullptr,
+ Base::ManagerOperations::MGROPS_DESTRUCT_FUNCTOR);
+ }
+ }
+
+ void Copy(const function_detail& other)
+ {
+ if (other.HaveManager())
+ {
+ (void)(*other.mMgrFuncPtr)(static_cast<void*>(&mStorage),
+ const_cast<void*>(static_cast<const void*>(&other.mStorage)),
+ Base::ManagerOperations::MGROPS_COPY_FUNCTOR);
+ }
+
+ mMgrFuncPtr = other.mMgrFuncPtr;
+ mInvokeFuncPtr = other.mInvokeFuncPtr;
+ }
+
+ void Move(function_detail&& other)
+ {
+ if (other.HaveManager())
+ {
+ (void)(*other.mMgrFuncPtr)(static_cast<void*>(&mStorage), static_cast<void*>(&other.mStorage),
+ Base::ManagerOperations::MGROPS_MOVE_FUNCTOR);
+ }
+
+ mMgrFuncPtr = other.mMgrFuncPtr;
+ mInvokeFuncPtr = other.mInvokeFuncPtr;
+ other.mMgrFuncPtr = nullptr;
+ other.mInvokeFuncPtr = &DefaultInvoker;
+ }
+
+ template <typename Functor>
+ void CreateForwardFunctor(Functor&& functor)
+ {
+ using DecayedFunctorType = typename eastl::decay<Functor>::type;
+ using FunctionManagerType = typename Base::template function_manager<DecayedFunctorType, R, Args...>;
+
+ if (internal::is_null(functor))
+ {
+ mMgrFuncPtr = nullptr;
+ mInvokeFuncPtr = &DefaultInvoker;
+ }
+ else
+ {
+ mMgrFuncPtr = &FunctionManagerType::Manager;
+ mInvokeFuncPtr = &FunctionManagerType::Invoker;
+ FunctionManagerType::CreateFunctor(mStorage, eastl::forward<Functor>(functor));
+ }
+ }
+
+ private:
+ typedef void* (*ManagerFuncPtr)(void*, void*, typename Base::ManagerOperations);
+ typedef R (*InvokeFuncPtr)(Args..., const FunctorStorageType&);
+
+ EA_DISABLE_GCC_WARNING(-Wreturn-type);
+ EA_DISABLE_CLANG_WARNING(-Wreturn-type);
+ EA_DISABLE_VC_WARNING(4716); // 'function' must return a value
+ // We cannot assume that R is default constructible.
+ // This function is called only when the function object CANNOT be called because it is empty,
+ // it will always throw or assert so we never use the return value anyways and neither should the caller.
+ static R DefaultInvoker(Args... /*args*/, const FunctorStorageType& /*functor*/)
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw eastl::bad_function_call();
+ #else
+ EASTL_ASSERT_MSG(false, "function_detail call on an empty function_detail<R(Args..)>");
+ #endif
+ };
+ EA_RESTORE_VC_WARNING();
+ EA_RESTORE_CLANG_WARNING();
+ EA_RESTORE_GCC_WARNING();
+
+
+ ManagerFuncPtr mMgrFuncPtr = nullptr;
+ InvokeFuncPtr mInvokeFuncPtr = &DefaultInvoker;
+ };
+
+ } // namespace internal
+
+} // namespace eastl
+
+#endif // EASTL_FUNCTION_DETAIL_H
diff --git a/include/EASTL/internal/function_help.h b/include/EASTL/internal/function_help.h
new file mode 100644
index 0000000..04481d3
--- /dev/null
+++ b/include/EASTL/internal/function_help.h
@@ -0,0 +1,51 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_INTERNAL_FUNCTION_HELP_H
+#define EASTL_INTERNAL_FUNCTION_HELP_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+namespace eastl
+{
+ namespace internal
+ {
+
+ //////////////////////////////////////////////////////////////////////
+ // is_null
+ //
+ template <typename T>
+ bool is_null(const T&)
+ {
+ return false;
+ }
+
+ template <typename Result, typename... Arguments>
+ bool is_null(Result (*const& function_pointer)(Arguments...))
+ {
+ return function_pointer == nullptr;
+ }
+
+ template <typename Result, typename Class, typename... Arguments>
+ bool is_null(Result (Class::*const& function_pointer)(Arguments...))
+ {
+ return function_pointer == nullptr;
+ }
+
+ template <typename Result, typename Class, typename... Arguments>
+ bool is_null(Result (Class::*const& function_pointer)(Arguments...) const)
+ {
+ return function_pointer == nullptr;
+ }
+
+ } // namespace internal
+} // namespace eastl
+
+#endif // Header include guard
+
diff --git a/include/EASTL/internal/functional_base.h b/include/EASTL/internal/functional_base.h
new file mode 100644
index 0000000..a7d2dc9
--- /dev/null
+++ b/include/EASTL/internal/functional_base.h
@@ -0,0 +1,389 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_FUNCTIONAL_BASE_H
+#define EASTL_INTERNAL_FUNCTIONAL_BASE_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/memory_base.h>
+#include <EASTL/internal/move_help.h>
+#include <EASTL/type_traits.h>
+
+namespace eastl
+{
+ // foward declaration for swap
+ template <typename T>
+ inline void swap(T& a, T& b)
+ EA_NOEXCEPT_IF(eastl::is_nothrow_move_constructible<T>::value&& eastl::is_nothrow_move_assignable<T>::value);
+
+
+ /// invoke
+ ///
+ /// invoke is a generalized function-call operator which works on function pointers, member function
+ /// pointers, callable objects and member pointers.
+ ///
+ /// For (member/non-member) function pointers and callable objects, it returns the result of calling
+ /// the function/object with the specified arguments. For member data pointers, it simply returns
+ /// the member.
+ ///
+ /// Note that there are also reference_wrapper specializations of invoke, which need to be defined
+ /// later since reference_wrapper uses invoke in its implementation. Those are defined immediately
+ /// after the definition of reference_wrapper.
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/functional/invoke
+ ///
+ template <typename R, typename C, typename T, typename... Args>
+ auto invoke_impl(R C::*func, T&& obj, Args&&... args) ->
+ typename enable_if<is_base_of<C, decay_t<decltype(obj)>>::value,
+ decltype((eastl::forward<T>(obj).*func)(eastl::forward<Args>(args)...))>::type
+ {
+ return (eastl::forward<T>(obj).*func)(eastl::forward<Args>(args)...);
+ }
+
+ template <typename F, typename... Args>
+ auto invoke_impl(F&& func, Args&&... args) -> decltype(eastl::forward<F>(func)(eastl::forward<Args>(args)...))
+ {
+ return eastl::forward<F>(func)(eastl::forward<Args>(args)...);
+ }
+
+
+ template <typename R, typename C, typename T, typename... Args>
+ auto invoke_impl(R C::*func, T&& obj, Args&&... args) -> decltype(((*eastl::forward<T>(obj)).*func)(eastl::forward<Args>(args)...))
+ {
+ return ((*eastl::forward<T>(obj)).*func)(eastl::forward<Args>(args)...);
+ }
+
+ template <typename M, typename C, typename T>
+ auto invoke_impl(M C::*member, T&& obj) ->
+ typename enable_if<
+ is_base_of<C, decay_t<decltype(obj)>>::value,
+ decltype(obj.*member)
+ >::type
+ {
+ return obj.*member;
+ }
+
+ template <typename M, typename C, typename T>
+ auto invoke_impl(M C::*member, T&& obj) -> decltype((*eastl::forward<T>(obj)).*member)
+ {
+ return (*eastl::forward<T>(obj)).*member;
+ }
+
+ template <typename F, typename... Args>
+ inline decltype(auto) invoke(F&& func, Args&&... args)
+ {
+ return invoke_impl(eastl::forward<F>(func), eastl::forward<Args>(args)...);
+ }
+
+ template <typename F, typename = void, typename... Args>
+ struct invoke_result_impl {
+ };
+
+ template <typename F, typename... Args>
+ struct invoke_result_impl<F, void_t<decltype(invoke_impl(eastl::declval<decay_t<F>>(), eastl::declval<Args>()...))>, Args...>
+ {
+ typedef decltype(invoke_impl(eastl::declval<decay_t<F>>(), eastl::declval<Args>()...)) type;
+ };
+
+ template <typename F, typename... Args>
+ struct invoke_result : public invoke_result_impl<F, void, Args...> {};
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename F, typename... Args>
+ using invoke_result_t = typename invoke_result<F, Args...>::type;
+ #endif
+
+ template <typename F, typename = void, typename... Args>
+ struct is_invocable_impl : public eastl::false_type {};
+
+ template <typename F, typename... Args>
+ struct is_invocable_impl<F, void_t<typename eastl::invoke_result<F, Args...>::type>, Args...> : public eastl::true_type {};
+
+ template <typename F, typename... Args>
+ struct is_invocable : public is_invocable_impl<F, void, Args...> {};
+
+ template <typename R, typename F, typename = void, typename... Args>
+ struct is_invocable_r_impl : public eastl::false_type {};
+
+ template <typename R, typename F, typename... Args>
+ struct is_invocable_r_impl<R, F, void_t<typename invoke_result<F, Args...>::type>, Args...>
+ : public is_convertible<typename invoke_result<F, Args...>::type, R> {};
+
+ template <typename R, typename F, typename... Args>
+ struct is_invocable_r : public is_invocable_r_impl<R, F, void, Args...> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename F, typename... Args>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_v = is_invocable<F, Args...>::value;
+
+ template <typename R, typename F, typename... Args>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_r_v = is_invocable_r<R, F, Args...>::value;
+ #endif
+
+ /// allocator_arg_t
+ ///
+ /// allocator_arg_t is an empty class type used to disambiguate the overloads of
+ /// constructors and member functions of allocator-aware objects, including tuple,
+ /// function, promise, and packaged_task.
+ /// http://en.cppreference.com/w/cpp/memory/allocator_arg_t
+ ///
+ struct allocator_arg_t
+ {};
+
+
+ /// allocator_arg
+ ///
+ /// allocator_arg is a constant of type allocator_arg_t used to disambiguate, at call site,
+ /// the overloads of the constructors and member functions of allocator-aware objects,
+ /// such as tuple, function, promise, and packaged_task.
+ /// http://en.cppreference.com/w/cpp/memory/allocator_arg
+ ///
+ #if !defined(EA_COMPILER_NO_CONSTEXPR)
+ EA_CONSTEXPR allocator_arg_t allocator_arg = allocator_arg_t();
+ #endif
+
+
+ template <typename Argument, typename Result>
+ struct unary_function
+ {
+ typedef Argument argument_type;
+ typedef Result result_type;
+ };
+
+
+ template <typename Argument1, typename Argument2, typename Result>
+ struct binary_function
+ {
+ typedef Argument1 first_argument_type;
+ typedef Argument2 second_argument_type;
+ typedef Result result_type;
+ };
+
+
+ /// less<T>
+ template <typename T = void>
+ struct less : public binary_function<T, T, bool>
+ {
+ EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const
+ { return a < b; }
+ };
+
+ // http://en.cppreference.com/w/cpp/utility/functional/less_void
+ template <>
+ struct less<void>
+ {
+ template<typename A, typename B>
+ EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const
+ -> decltype(eastl::forward<A>(a) < eastl::forward<B>(b))
+ { return eastl::forward<A>(a) < eastl::forward<B>(b); }
+ };
+
+
+ /// reference_wrapper
+ template <typename T>
+ class reference_wrapper
+ {
+ public:
+ typedef T type;
+
+ reference_wrapper(T&) EA_NOEXCEPT;
+ reference_wrapper(T&&) = delete;
+ reference_wrapper(const reference_wrapper<T>& x) EA_NOEXCEPT;
+
+ reference_wrapper& operator=(const reference_wrapper<T>& x) EA_NOEXCEPT;
+
+ operator T& () const EA_NOEXCEPT;
+ T& get() const EA_NOEXCEPT;
+
+ template <typename... ArgTypes>
+ typename eastl::result_of<T&(ArgTypes&&...)>::type operator() (ArgTypes&&...) const;
+
+ private:
+ T* val;
+ };
+
+ template <typename T>
+ reference_wrapper<T>::reference_wrapper(T &v) EA_NOEXCEPT
+ : val(eastl::addressof(v))
+ {}
+
+ template <typename T>
+ reference_wrapper<T>::reference_wrapper(const reference_wrapper<T>& other) EA_NOEXCEPT
+ : val(other.val)
+ {}
+
+ template <typename T>
+ reference_wrapper<T>& reference_wrapper<T>::operator=(const reference_wrapper<T>& other) EA_NOEXCEPT
+ {
+ val = other.val;
+ return *this;
+ }
+
+ template <typename T>
+ reference_wrapper<T>::operator T&() const EA_NOEXCEPT
+ {
+ return *val;
+ }
+
+ template <typename T>
+ T& reference_wrapper<T>::get() const EA_NOEXCEPT
+ {
+ return *val;
+ }
+
+ template <typename T>
+ template <typename... ArgTypes>
+ typename eastl::result_of<T&(ArgTypes&&...)>::type reference_wrapper<T>::operator() (ArgTypes&&... args) const
+ {
+ return eastl::invoke(*val, eastl::forward<ArgTypes>(args)...);
+ }
+
+ // reference_wrapper-specific utilties
+ template <typename T>
+ reference_wrapper<T> ref(T& t) EA_NOEXCEPT
+ {
+ return eastl::reference_wrapper<T>(t);
+ }
+
+ template <typename T>
+ void ref(const T&&) = delete;
+
+ template <typename T>
+ reference_wrapper<T> ref(reference_wrapper<T>t) EA_NOEXCEPT
+ {
+ return eastl::ref(t.get());
+ }
+
+ template <typename T>
+ reference_wrapper<const T> cref(const T& t) EA_NOEXCEPT
+ {
+ return eastl::reference_wrapper<const T>(t);
+ }
+
+ template <typename T>
+ void cref(const T&&) = delete;
+
+ template <typename T>
+ reference_wrapper<const T> cref(reference_wrapper<T> t) EA_NOEXCEPT
+ {
+ return eastl::cref(t.get());
+ }
+
+
+ // reference_wrapper-specific type traits
+ template <typename T>
+ struct is_reference_wrapper_helper
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_reference_wrapper_helper<eastl::reference_wrapper<T> >
+ : public eastl::true_type {};
+
+ template <typename T>
+ struct is_reference_wrapper
+ : public eastl::is_reference_wrapper_helper<typename eastl::remove_cv<T>::type> {};
+
+
+ // Helper which adds a reference to a type when given a reference_wrapper of that type.
+ template <typename T>
+ struct remove_reference_wrapper
+ { typedef T type; };
+
+ template <typename T>
+ struct remove_reference_wrapper< eastl::reference_wrapper<T> >
+ { typedef T& type; };
+
+ template <typename T>
+ struct remove_reference_wrapper< const eastl::reference_wrapper<T> >
+ { typedef T& type; };
+
+ // reference_wrapper specializations of invoke
+ // These have to come after reference_wrapper is defined, but reference_wrapper needs to have a
+ // definition of invoke, so these specializations need to come after everything else has been defined.
+ template <typename R, typename C, typename T, typename... Args>
+ auto invoke_impl(R (C::*func)(Args...), T&& obj, Args&&... args) ->
+ typename enable_if<is_reference_wrapper<typename remove_reference<T>::type>::value,
+ decltype((obj.get().*func)(eastl::forward<Args>(args)...))>::type
+ {
+ return (obj.get().*func)(eastl::forward<Args>(args)...);
+ }
+
+ template <typename M, typename C, typename T>
+ auto invoke_impl(M(C::*member), T&& obj) ->
+ typename enable_if<is_reference_wrapper<typename remove_reference<T>::type>::value,
+ decltype(obj.get().*member)>::type
+ {
+ return obj.get().*member;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // bind
+ ///////////////////////////////////////////////////////////////////////
+
+ /// bind1st
+ ///
+ template <typename Operation>
+ class binder1st : public unary_function<typename Operation::second_argument_type, typename Operation::result_type>
+ {
+ protected:
+ typename Operation::first_argument_type value;
+ Operation op;
+
+ public:
+ binder1st(const Operation& x, const typename Operation::first_argument_type& y)
+ : value(y), op(x) { }
+
+ typename Operation::result_type operator()(const typename Operation::second_argument_type& x) const
+ { return op(value, x); }
+
+ typename Operation::result_type operator()(typename Operation::second_argument_type& x) const
+ { return op(value, x); }
+ };
+
+
+ template <typename Operation, typename T>
+ inline binder1st<Operation> bind1st(const Operation& op, const T& x)
+ {
+ typedef typename Operation::first_argument_type value;
+ return binder1st<Operation>(op, value(x));
+ }
+
+
+ /// bind2nd
+ ///
+ template <typename Operation>
+ class binder2nd : public unary_function<typename Operation::first_argument_type, typename Operation::result_type>
+ {
+ protected:
+ Operation op;
+ typename Operation::second_argument_type value;
+
+ public:
+ binder2nd(const Operation& x, const typename Operation::second_argument_type& y)
+ : op(x), value(y) { }
+
+ typename Operation::result_type operator()(const typename Operation::first_argument_type& x) const
+ { return op(x, value); }
+
+ typename Operation::result_type operator()(typename Operation::first_argument_type& x) const
+ { return op(x, value); }
+ };
+
+
+ template <typename Operation, typename T>
+ inline binder2nd<Operation> bind2nd(const Operation& op, const T& x)
+ {
+ typedef typename Operation::second_argument_type value;
+ return binder2nd<Operation>(op, value(x));
+ }
+
+} // namespace eastl
+
+#endif // Header include guard
diff --git a/include/EASTL/internal/generic_iterator.h b/include/EASTL/internal/generic_iterator.h
new file mode 100644
index 0000000..b32998a
--- /dev/null
+++ b/include/EASTL/internal/generic_iterator.h
@@ -0,0 +1,208 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Implements a generic iterator from a given iteratable type, such as a pointer.
+// We cannot put this file into our own iterator.h file because we need to
+// still be able to use this file when we have our iterator.h disabled.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_GENERIC_ITERATOR_H
+#define EASTL_INTERNAL_GENERIC_ITERATOR_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/type_traits.h>
+
+// There is no warning number 'number'.
+// Member template functions cannot be used for copy-assignment or copy-construction.
+EA_DISABLE_VC_WARNING(4619 4217);
+
+
+namespace eastl
+{
+
+ /// generic_iterator
+ ///
+ /// Converts something which can be iterated into a formal iterator.
+ /// While this class' primary purpose is to allow the conversion of
+ /// a pointer to an iterator, you can convert anything else to an
+ /// iterator by defining an iterator_traits<> specialization for that
+ /// object type. See EASTL iterator.h for this.
+ ///
+ /// Example usage:
+ /// typedef generic_iterator<int*> IntArrayIterator;
+ /// typedef generic_iterator<int*, char> IntArrayIteratorOther;
+ ///
+ template <typename Iterator, typename Container = void>
+ class generic_iterator
+ {
+ protected:
+ Iterator mIterator;
+
+ public:
+ typedef typename eastl::iterator_traits<Iterator>::iterator_category iterator_category;
+ typedef typename eastl::iterator_traits<Iterator>::value_type value_type;
+ typedef typename eastl::iterator_traits<Iterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<Iterator>::reference reference;
+ typedef typename eastl::iterator_traits<Iterator>::pointer pointer;
+ typedef Iterator iterator_type;
+ typedef iterator_type wrapped_iterator_type; // This is not in the C++ Standard; it's used by use to identify it as a wrapping iterator type.
+ typedef Container container_type;
+ typedef generic_iterator<Iterator, Container> this_type;
+
+ generic_iterator()
+ : mIterator(iterator_type()) { }
+
+ explicit generic_iterator(const iterator_type& x)
+ : mIterator(x) { }
+
+ this_type& operator=(const iterator_type& x)
+ { mIterator = x; return *this; }
+
+ template <typename Iterator2>
+ generic_iterator(const generic_iterator<Iterator2, Container>& x)
+ : mIterator(x.base()) { }
+
+ reference operator*() const
+ { return *mIterator; }
+
+ pointer operator->() const
+ { return mIterator; }
+
+ this_type& operator++()
+ { ++mIterator; return *this; }
+
+ this_type operator++(int)
+ { return this_type(mIterator++); }
+
+ this_type& operator--()
+ { --mIterator; return *this; }
+
+ this_type operator--(int)
+ { return this_type(mIterator--); }
+
+ reference operator[](const difference_type& n) const
+ { return mIterator[n]; }
+
+ this_type& operator+=(const difference_type& n)
+ { mIterator += n; return *this; }
+
+ this_type operator+(const difference_type& n) const
+ { return this_type(mIterator + n); }
+
+ this_type& operator-=(const difference_type& n)
+ { mIterator -= n; return *this; }
+
+ this_type operator-(const difference_type& n) const
+ { return this_type(mIterator - n); }
+
+ const iterator_type& base() const
+ { return mIterator; }
+
+ }; // class generic_iterator
+
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator==(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() == rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator==(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() == rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator!=(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() != rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator!=(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() != rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator<(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() < rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator<(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() < rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator>(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() > rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator>(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() > rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator<=(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() <= rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator<=(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() <= rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator>=(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() >= rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator>=(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() >= rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline typename generic_iterator<IteratorL, Container>::difference_type
+ operator-(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() - rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline generic_iterator<Iterator, Container>
+ operator+(typename generic_iterator<Iterator, Container>::difference_type n, const generic_iterator<Iterator, Container>& x)
+ { return generic_iterator<Iterator, Container>(x.base() + n); }
+
+
+
+ /// is_generic_iterator
+ ///
+ /// Tells if an iterator is one of these generic_iterators. This is useful if you want to
+ /// write code that uses miscellaneous iterators but wants to tell if they are generic_iterators.
+ /// A primary reason to do so is that you can get at the pointer within the generic_iterator.
+ ///
+ template <typename Iterator>
+ struct is_generic_iterator : public false_type { };
+
+ template <typename Iterator, typename Container>
+ struct is_generic_iterator<generic_iterator<Iterator, Container> > : public true_type { };
+
+
+ /// unwrap_generic_iterator
+ ///
+ /// Returns Iterator::get_base() if it's a generic_iterator, else returns Iterator as-is.
+ ///
+ /// Example usage:
+ /// vector<int> intVector;
+ /// eastl::generic_iterator<vector<int>::iterator> genericIterator(intVector.begin());
+ /// vector<int>::iterator it = unwrap_generic_iterator(genericIterator);
+ ///
+ template <typename Iterator>
+ inline typename eastl::is_iterator_wrapper_helper<Iterator, eastl::is_generic_iterator<Iterator>::value>::iterator_type unwrap_generic_iterator(Iterator it)
+ { return eastl::is_iterator_wrapper_helper<Iterator, eastl::is_generic_iterator<Iterator>::value>::get_base(it); }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/include/EASTL/internal/hashtable.h b/include/EASTL/internal/hashtable.h
new file mode 100644
index 0000000..bb6d27e
--- /dev/null
+++ b/include/EASTL/internal/hashtable.h
@@ -0,0 +1,3222 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a hashtable, much like the C++11 unordered_set/unordered_map.
+// proposed classes.
+// The primary distinctions between this hashtable and C++11 unordered containers are:
+// - hashtable is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+// - hashtable is slightly more space-efficient than a conventional std hashtable
+// implementation on platforms with 64 bit size_t. This is
+// because std STL uses size_t (64 bits) in data structures whereby 32 bits
+// of data would be fine.
+// - hashtable can contain objects with alignment requirements. TR1 hash tables
+// cannot do so without a bit of tedious non-portable effort.
+// - hashtable supports debug memory naming natively.
+// - hashtable provides a find function that lets you specify a type that is
+// different from the hash table key type. This is particularly useful for
+// the storing of string objects but finding them by char pointers.
+// - hashtable provides a lower level insert function which lets the caller
+// specify the hash code and optionally the node instance.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_HASHTABLE_H
+#define EASTL_INTERNAL_HASHTABLE_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/allocator.h>
+#include <EASTL/iterator.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <EASTL/tuple.h>
+#include <string.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+ #include <new>
+ #include <stddef.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+// 4512 - 'class' : assignment operator could not be generated.
+// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+EA_DISABLE_VC_WARNING(4512 4530 4571);
+
+
+namespace eastl
+{
+
+ /// EASTL_HASHTABLE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_HASHTABLE_DEFAULT_NAME
+ #define EASTL_HASHTABLE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hashtable" // Unless the user overrides something, this is "EASTL hashtable".
+ #endif
+
+
+ /// EASTL_HASHTABLE_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_HASHTABLE_DEFAULT_ALLOCATOR
+ #define EASTL_HASHTABLE_DEFAULT_ALLOCATOR allocator_type(EASTL_HASHTABLE_DEFAULT_NAME)
+ #endif
+
+
+ /// kHashtableAllocFlagBuckets
+ /// Flag to allocator which indicates that we are allocating buckets and not nodes.
+ enum { kHashtableAllocFlagBuckets = 0x00400000 };
+
+
+ /// gpEmptyBucketArray
+ ///
+ /// A shared representation of an empty hash table. This is present so that
+ /// a new empty hashtable allocates no memory. It has two entries, one for
+ /// the first lone empty (NULL) bucket, and one for the non-NULL trailing sentinel.
+ ///
+ extern EASTL_API void* gpEmptyBucketArray[2];
+
+
+ /// EASTL_MACRO_SWAP
+ ///
+ /// Use EASTL_MACRO_SWAP because GCC (at least v4.6-4.8) has a bug where it fails to compile eastl::swap(mpBucketArray, x.mpBucketArray).
+ ///
+ #define EASTL_MACRO_SWAP(Type, a, b) \
+ { Type temp = a; a = b; b = temp; }
+
+
+ /// hash_node
+ ///
+ /// A hash_node stores an element in a hash table, much like a
+ /// linked list node stores an element in a linked list.
+ /// A hash_node additionally can, via template parameter,
+ /// store a hash code in the node to speed up hash calculations
+ /// and comparisons in some cases.
+ ///
+ template <typename Value, bool bCacheHashCode>
+ struct hash_node;
+
+ EA_DISABLE_VC_WARNING(4625 4626) // "copy constructor / assignment operator could not be generated because a base class copy constructor is inaccessible or deleted"
+ #ifdef EA_COMPILER_MSVC_2015
+ EA_DISABLE_VC_WARNING(5026) // disable warning: "move constructor was implicitly defined as deleted"
+ #endif
+ template <typename Value>
+ struct hash_node<Value, true>
+ {
+ hash_node() = default;
+ hash_node(const hash_node&) = default;
+ hash_node(hash_node&&) = default;
+
+ Value mValue;
+ hash_node* mpNext;
+ eastl_size_t mnHashCode; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ } EASTL_MAY_ALIAS;
+
+ template <typename Value>
+ struct hash_node<Value, false>
+ {
+ hash_node() = default;
+ hash_node(const hash_node&) = default;
+ hash_node(hash_node&&) = default;
+
+ Value mValue;
+ hash_node* mpNext;
+ } EASTL_MAY_ALIAS;
+
+ #ifdef EA_COMPILER_MSVC_2015
+ EA_RESTORE_VC_WARNING()
+ #endif
+ EA_RESTORE_VC_WARNING()
+
+
+ // has_hashcode_member
+ //
+ // Custom type-trait that checks for the existence of a class data member 'mnHashCode'.
+ //
+ // In order to explicitly instantiate the hashtable without error we need to SFINAE away the functions that will
+ // fail to compile based on if the 'hash_node' contains a 'mnHashCode' member dictated by the hashtable template
+ // parameters. The hashtable support this level of configuration to allow users to choose which between the space vs.
+ // time optimization.
+ //
+ namespace Internal
+ {
+ template <class T>
+ struct has_hashcode_member
+ {
+ private:
+ template <class U> static eastl::no_type test(...);
+ template <class U> static eastl::yes_type test(decltype(U::mnHashCode)* = 0);
+ public:
+ static const bool value = sizeof(test<T>(0)) == sizeof(eastl::yes_type);
+ };
+ }
+
+ static_assert(Internal::has_hashcode_member<hash_node<int, true>>::value, "contains a mnHashCode member");
+ static_assert(!Internal::has_hashcode_member<hash_node<int, false>>::value, "doesn't contain a mnHashCode member");
+
+ // convenience macros to increase the readability of the code paths that must SFINAE on if the 'hash_node'
+ // contains the cached hashed value or not.
+ #define ENABLE_IF_HAS_HASHCODE(T, RT) typename eastl::enable_if<Internal::has_hashcode_member<T>::value, RT>::type*
+ #define ENABLE_IF_HASHCODE_EASTLSIZET(T, RT) typename eastl::enable_if<eastl::is_convertible<T, eastl_size_t>::value, RT>::type
+ #define ENABLE_IF_TRUETYPE(T) typename eastl::enable_if<T::value>::type*
+ #define DISABLE_IF_TRUETYPE(T) typename eastl::enable_if<!T::value>::type*
+
+
+ /// node_iterator_base
+ ///
+ /// Node iterators iterate nodes within a given bucket.
+ ///
+ /// We define a base class here because it is shared by both const and
+ /// non-const iterators.
+ ///
+ template <typename Value, bool bCacheHashCode>
+ struct node_iterator_base
+ {
+ typedef hash_node<Value, bCacheHashCode> node_type;
+
+ node_type* mpNode;
+
+ node_iterator_base(node_type* pNode)
+ : mpNode(pNode) { }
+
+ void increment()
+ { mpNode = mpNode->mpNext; }
+ };
+
+
+
+ /// node_iterator
+ ///
+ /// Node iterators iterate nodes within a given bucket.
+ ///
+ /// The bConst parameter defines if the iterator is a const_iterator
+ /// or an iterator.
+ ///
+ template <typename Value, bool bConst, bool bCacheHashCode>
+ struct node_iterator : public node_iterator_base<Value, bCacheHashCode>
+ {
+ public:
+ typedef node_iterator_base<Value, bCacheHashCode> base_type;
+ typedef node_iterator<Value, bConst, bCacheHashCode> this_type;
+ typedef typename base_type::node_type node_type;
+ typedef Value value_type;
+ typedef typename type_select<bConst, const Value*, Value*>::type pointer;
+ typedef typename type_select<bConst, const Value&, Value&>::type reference;
+ typedef ptrdiff_t difference_type;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ explicit node_iterator(node_type* pNode = NULL)
+ : base_type(pNode) { }
+
+ node_iterator(const node_iterator<Value, true, bCacheHashCode>& x)
+ : base_type(x.mpNode) { }
+
+ reference operator*() const
+ { return base_type::mpNode->mValue; }
+
+ pointer operator->() const
+ { return &(base_type::mpNode->mValue); }
+
+ node_iterator& operator++()
+ { base_type::increment(); return *this; }
+
+ node_iterator operator++(int)
+ { node_iterator temp(*this); base_type::increment(); return temp; }
+
+ }; // node_iterator
+
+
+
+ /// hashtable_iterator_base
+ ///
+ /// A hashtable_iterator iterates the entire hash table and not just
+ /// nodes within a single bucket. Users in general will use a hash
+ /// table iterator much more often, as it is much like other container
+ /// iterators (e.g. vector::iterator).
+ ///
+ /// We define a base class here because it is shared by both const and
+ /// non-const iterators.
+ ///
+ template <typename Value, bool bCacheHashCode>
+ struct hashtable_iterator_base
+ {
+ public:
+ typedef hashtable_iterator_base<Value, bCacheHashCode> this_type;
+ typedef hash_node<Value, bCacheHashCode> node_type;
+
+ protected:
+ template <typename, typename, typename, typename, typename, typename, typename, typename, typename, bool, bool, bool>
+ friend class hashtable;
+
+ template <typename, bool, bool>
+ friend struct hashtable_iterator;
+
+ template <typename V, bool b>
+ friend bool operator==(const hashtable_iterator_base<V, b>&, const hashtable_iterator_base<V, b>&);
+
+ template <typename V, bool b>
+ friend bool operator!=(const hashtable_iterator_base<V, b>&, const hashtable_iterator_base<V, b>&);
+
+ node_type* mpNode; // Current node within current bucket.
+ node_type** mpBucket; // Current bucket.
+
+ public:
+ hashtable_iterator_base(node_type* pNode, node_type** pBucket)
+ : mpNode(pNode), mpBucket(pBucket) { }
+
+ void increment_bucket()
+ {
+ ++mpBucket;
+ while(*mpBucket == NULL) // We store an extra bucket with some non-NULL value at the end
+ ++mpBucket; // of the bucket array so that finding the end of the bucket
+ mpNode = *mpBucket; // array is quick and simple.
+ }
+
+ void increment()
+ {
+ mpNode = mpNode->mpNext;
+
+ while(mpNode == NULL)
+ mpNode = *++mpBucket;
+ }
+
+ }; // hashtable_iterator_base
+
+
+
+
+ /// hashtable_iterator
+ ///
+ /// A hashtable_iterator iterates the entire hash table and not just
+ /// nodes within a single bucket. Users in general will use a hash
+ /// table iterator much more often, as it is much like other container
+ /// iterators (e.g. vector::iterator).
+ ///
+ /// The bConst parameter defines if the iterator is a const_iterator
+ /// or an iterator.
+ ///
+ template <typename Value, bool bConst, bool bCacheHashCode>
+ struct hashtable_iterator : public hashtable_iterator_base<Value, bCacheHashCode>
+ {
+ public:
+ typedef hashtable_iterator_base<Value, bCacheHashCode> base_type;
+ typedef hashtable_iterator<Value, bConst, bCacheHashCode> this_type;
+ typedef hashtable_iterator<Value, false, bCacheHashCode> this_type_non_const;
+ typedef typename base_type::node_type node_type;
+ typedef Value value_type;
+ typedef typename type_select<bConst, const Value*, Value*>::type pointer;
+ typedef typename type_select<bConst, const Value&, Value&>::type reference;
+ typedef ptrdiff_t difference_type;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ hashtable_iterator(node_type* pNode = NULL, node_type** pBucket = NULL)
+ : base_type(pNode, pBucket) { }
+
+ hashtable_iterator(node_type** pBucket)
+ : base_type(*pBucket, pBucket) { }
+
+ hashtable_iterator(const this_type_non_const& x)
+ : base_type(x.mpNode, x.mpBucket) { }
+
+ reference operator*() const
+ { return base_type::mpNode->mValue; }
+
+ pointer operator->() const
+ { return &(base_type::mpNode->mValue); }
+
+ hashtable_iterator& operator++()
+ { base_type::increment(); return *this; }
+
+ hashtable_iterator operator++(int)
+ { hashtable_iterator temp(*this); base_type::increment(); return temp; }
+
+ const node_type* get_node() const
+ { return base_type::mpNode; }
+
+ }; // hashtable_iterator
+
+
+
+
+ /// ht_distance
+ ///
+ /// This function returns the same thing as distance() for
+ /// forward iterators but returns zero for input iterators.
+ /// The reason why is that input iterators can only be read
+ /// once, and calling distance() on an input iterator destroys
+ /// the ability to read it. This ht_distance is used only for
+ /// optimization and so the code will merely work better with
+ /// forward iterators that input iterators.
+ ///
+ template <typename Iterator>
+ inline typename eastl::iterator_traits<Iterator>::difference_type
+ distance_fw_impl(Iterator /*first*/, Iterator /*last*/, EASTL_ITC_NS::input_iterator_tag)
+ {
+ return 0;
+ }
+
+ template <typename Iterator>
+ inline typename eastl::iterator_traits<Iterator>::difference_type
+ distance_fw_impl(Iterator first, Iterator last, EASTL_ITC_NS::forward_iterator_tag)
+ { return eastl::distance(first, last); }
+
+ template <typename Iterator>
+ inline typename eastl::iterator_traits<Iterator>::difference_type
+ ht_distance(Iterator first, Iterator last)
+ {
+ typedef typename eastl::iterator_traits<Iterator>::iterator_category IC;
+ return distance_fw_impl(first, last, IC());
+ }
+
+
+
+
+ /// mod_range_hashing
+ ///
+ /// Implements the algorithm for conversion of a number in the range of
+ /// [0, SIZE_T_MAX] to the range of [0, BucketCount).
+ ///
+ struct mod_range_hashing
+ {
+ uint32_t operator()(size_t r, uint32_t n) const
+ { return r % n; }
+ };
+
+
+ /// default_ranged_hash
+ ///
+ /// Default ranged hash function H. In principle it should be a
+ /// function object composed from objects of type H1 and H2 such that
+ /// h(k, n) = h2(h1(k), n), but that would mean making extra copies of
+ /// h1 and h2. So instead we'll just use a tag to tell class template
+ /// hashtable to do that composition.
+ ///
+ struct default_ranged_hash{ };
+
+
+ /// prime_rehash_policy
+ ///
+ /// Default value for rehash policy. Bucket size is (usually) the
+ /// smallest prime that keeps the load factor small enough.
+ ///
+ struct EASTL_API prime_rehash_policy
+ {
+ public:
+ float mfMaxLoadFactor;
+ float mfGrowthFactor;
+ mutable uint32_t mnNextResize;
+
+ public:
+ prime_rehash_policy(float fMaxLoadFactor = 1.f)
+ : mfMaxLoadFactor(fMaxLoadFactor), mfGrowthFactor(2.f), mnNextResize(0) { }
+
+ float GetMaxLoadFactor() const
+ { return mfMaxLoadFactor; }
+
+ /// Return a bucket count no greater than nBucketCountHint,
+ /// Don't update member variables while at it.
+ static uint32_t GetPrevBucketCountOnly(uint32_t nBucketCountHint);
+
+ /// Return a bucket count no greater than nBucketCountHint.
+ /// This function has a side effect of updating mnNextResize.
+ uint32_t GetPrevBucketCount(uint32_t nBucketCountHint) const;
+
+ /// Return a bucket count no smaller than nBucketCountHint.
+ /// This function has a side effect of updating mnNextResize.
+ uint32_t GetNextBucketCount(uint32_t nBucketCountHint) const;
+
+ /// Return a bucket count appropriate for nElementCount elements.
+ /// This function has a side effect of updating mnNextResize.
+ uint32_t GetBucketCount(uint32_t nElementCount) const;
+
+ /// nBucketCount is current bucket count, nElementCount is current element count,
+ /// and nElementAdd is number of elements to be inserted. Do we need
+ /// to increase bucket count? If so, return pair(true, n), where
+ /// n is the new bucket count. If not, return pair(false, 0).
+ eastl::pair<bool, uint32_t>
+ GetRehashRequired(uint32_t nBucketCount, uint32_t nElementCount, uint32_t nElementAdd) const;
+ };
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // Base classes for hashtable. We define these base classes because
+ // in some cases we want to do different things depending on the
+ // value of a policy class. In some cases the policy class affects
+ // which member functions and nested typedefs are defined; we handle that
+ // by specializing base class templates. Several of the base class templates
+ // need to access other members of class template hashtable, so we use
+ // the "curiously recurring template pattern" (parent class is templated
+ // on type of child class) for them.
+ ///////////////////////////////////////////////////////////////////////
+
+
+ /// rehash_base
+ ///
+ /// Give hashtable the get_max_load_factor functions if the rehash
+ /// policy is prime_rehash_policy.
+ ///
+ template <typename RehashPolicy, typename Hashtable>
+ struct rehash_base { };
+
+ template <typename Hashtable>
+ struct rehash_base<prime_rehash_policy, Hashtable>
+ {
+ // Returns the max load factor, which is the load factor beyond
+ // which we rebuild the container with a new bucket count.
+ float get_max_load_factor() const
+ {
+ const Hashtable* const pThis = static_cast<const Hashtable*>(this);
+ return pThis->rehash_policy().GetMaxLoadFactor();
+ }
+
+ // If you want to make the hashtable never rehash (resize),
+ // set the max load factor to be a very high number (e.g. 100000.f).
+ void set_max_load_factor(float fMaxLoadFactor)
+ {
+ Hashtable* const pThis = static_cast<Hashtable*>(this);
+ pThis->rehash_policy(prime_rehash_policy(fMaxLoadFactor));
+ }
+ };
+
+
+
+
+ /// hash_code_base
+ ///
+ /// Encapsulates two policy issues that aren't quite orthogonal.
+ /// (1) The difference between using a ranged hash function and using
+ /// the combination of a hash function and a range-hashing function.
+ /// In the former case we don't have such things as hash codes, so
+ /// we have a dummy type as placeholder.
+ /// (2) Whether or not we cache hash codes. Caching hash codes is
+ /// meaningless if we have a ranged hash function. This is because
+ /// a ranged hash function converts an object directly to its
+ /// bucket index without ostensibly using a hash code.
+ /// We also put the key extraction and equality comparison function
+ /// objects here, for convenience.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal,
+ typename H1, typename H2, typename H, bool bCacheHashCode>
+ struct hash_code_base;
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: ranged hash function, no caching hash codes.
+ /// H1 and H2 are provided but ignored. We define a dummy hash code type.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2, typename H>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, false>
+ {
+ protected:
+ ExtractKey mExtractKey; // To do: Make this member go away entirely, as it never has any data.
+ Equal mEqual; // To do: Make this instance use zero space when it is zero size.
+ H mRangedHash; // To do: Make this instance use zero space when it is zero size
+
+ public:
+ H1 hash_function() const
+ { return H1(); }
+
+ Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
+ { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
+
+ const Equal& key_eq() const
+ { return mEqual; }
+
+ Equal& key_eq()
+ { return mEqual; }
+
+ protected:
+ typedef void* hash_code_t;
+ typedef uint32_t bucket_index_t;
+
+ hash_code_base(const ExtractKey& extractKey, const Equal& eq, const H1&, const H2&, const H& h)
+ : mExtractKey(extractKey), mEqual(eq), mRangedHash(h) { }
+
+ hash_code_t get_hash_code(const Key& key) const
+ {
+ EA_UNUSED(key);
+ return NULL;
+ }
+
+ bucket_index_t bucket_index(hash_code_t, uint32_t) const
+ { return (bucket_index_t)0; }
+
+ bucket_index_t bucket_index(const Key& key, hash_code_t, uint32_t nBucketCount) const
+ { return (bucket_index_t)mRangedHash(key, nBucketCount); }
+
+ bucket_index_t bucket_index(const hash_node<Value, false>* pNode, uint32_t nBucketCount) const
+ { return (bucket_index_t)mRangedHash(mExtractKey(pNode->mValue), nBucketCount); }
+
+ bool compare(const Key& key, hash_code_t, hash_node<Value, false>* pNode) const
+ { return mEqual(key, mExtractKey(pNode->mValue)); }
+
+ void copy_code(hash_node<Value, false>*, const hash_node<Value, false>*) const
+ { } // Nothing to do.
+
+ void set_code(hash_node<Value, false>* pDest, hash_code_t c) const
+ {
+ EA_UNUSED(pDest);
+ EA_UNUSED(c);
+ }
+
+ void base_swap(hash_code_base& x)
+ {
+ eastl::swap(mExtractKey, x.mExtractKey);
+ eastl::swap(mEqual, x.mEqual);
+ eastl::swap(mRangedHash, x.mRangedHash);
+ }
+
+ }; // hash_code_base
+
+
+
+ // No specialization for ranged hash function while caching hash codes.
+ // That combination is meaningless, and trying to do it is an error.
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: ranged hash function, cache hash codes.
+ /// This combination is meaningless, so we provide only a declaration
+ /// and no definition.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2, typename H>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, true>;
+
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: hash function and range-hashing function,
+ /// no caching of hash codes. H is provided but ignored.
+ /// Provides typedef and accessor required by TR1.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, default_ranged_hash, false>
+ {
+ protected:
+ ExtractKey mExtractKey;
+ Equal mEqual;
+ H1 m_h1;
+ H2 m_h2;
+
+ public:
+ typedef H1 hasher;
+
+ H1 hash_function() const
+ { return m_h1; }
+
+ Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
+ { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
+
+ const Equal& key_eq() const
+ { return mEqual; }
+
+ Equal& key_eq()
+ { return mEqual; }
+
+ protected:
+ typedef size_t hash_code_t;
+ typedef uint32_t bucket_index_t;
+ typedef hash_node<Value, false> node_type;
+
+ hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&)
+ : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { }
+
+ hash_code_t get_hash_code(const Key& key) const
+ { return (hash_code_t)m_h1(key); }
+
+ bucket_index_t bucket_index(hash_code_t c, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const Key&, hash_code_t c, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const node_type* pNode, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2((hash_code_t)m_h1(mExtractKey(pNode->mValue)), nBucketCount); }
+
+ bool compare(const Key& key, hash_code_t, node_type* pNode) const
+ { return mEqual(key, mExtractKey(pNode->mValue)); }
+
+ void copy_code(node_type*, const node_type*) const
+ { } // Nothing to do.
+
+ void set_code(node_type*, hash_code_t) const
+ { } // Nothing to do.
+
+ void base_swap(hash_code_base& x)
+ {
+ eastl::swap(mExtractKey, x.mExtractKey);
+ eastl::swap(mEqual, x.mEqual);
+ eastl::swap(m_h1, x.m_h1);
+ eastl::swap(m_h2, x.m_h2);
+ }
+
+ }; // hash_code_base
+
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: hash function and range-hashing function,
+ /// caching hash codes. H is provided but ignored.
+ /// Provides typedef and accessor required by TR1.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, default_ranged_hash, true>
+ {
+ protected:
+ ExtractKey mExtractKey;
+ Equal mEqual;
+ H1 m_h1;
+ H2 m_h2;
+
+ public:
+ typedef H1 hasher;
+
+ H1 hash_function() const
+ { return m_h1; }
+
+ Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
+ { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
+
+ const Equal& key_eq() const
+ { return mEqual; }
+
+ Equal& key_eq()
+ { return mEqual; }
+
+ protected:
+ typedef uint32_t hash_code_t;
+ typedef uint32_t bucket_index_t;
+ typedef hash_node<Value, true> node_type;
+
+ hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&)
+ : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { }
+
+ hash_code_t get_hash_code(const Key& key) const
+ { return (hash_code_t)m_h1(key); }
+
+ bucket_index_t bucket_index(hash_code_t c, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const Key&, hash_code_t c, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const node_type* pNode, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2((uint32_t)pNode->mnHashCode, nBucketCount); }
+
+ bool compare(const Key& key, hash_code_t c, node_type* pNode) const
+ { return (pNode->mnHashCode == c) && mEqual(key, mExtractKey(pNode->mValue)); }
+
+ void copy_code(node_type* pDest, const node_type* pSource) const
+ { pDest->mnHashCode = pSource->mnHashCode; }
+
+ void set_code(node_type* pDest, hash_code_t c) const
+ { pDest->mnHashCode = c; }
+
+ void base_swap(hash_code_base& x)
+ {
+ eastl::swap(mExtractKey, x.mExtractKey);
+ eastl::swap(mEqual, x.mEqual);
+ eastl::swap(m_h1, x.m_h1);
+ eastl::swap(m_h2, x.m_h2);
+ }
+
+ }; // hash_code_base
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// hashtable
+ ///
+ /// Key and Value: arbitrary CopyConstructible types.
+ ///
+ /// ExtractKey: function object that takes a object of type Value
+ /// and returns a value of type Key.
+ ///
+ /// Equal: function object that takes two objects of type k and returns
+ /// a bool-like value that is true if the two objects are considered equal.
+ ///
+ /// H1: a hash function. A unary function object with argument type
+ /// Key and result type size_t. Return values should be distributed
+ /// over the entire range [0, numeric_limits<uint32_t>::max()].
+ ///
+ /// H2: a range-hashing function (in the terminology of Tavori and
+ /// Dreizin). This is a function which takes the output of H1 and
+ /// converts it to the range of [0, n]. Usually it merely takes the
+ /// output of H1 and mods it to n.
+ ///
+ /// H: a ranged hash function (Tavori and Dreizin). This is merely
+ /// a class that combines the functionality of H1 and H2 together,
+ /// possibly in some way that is somehow improved over H1 and H2
+ /// It is a binary function whose argument types are Key and size_t
+ /// and whose result type is uint32_t. Given arguments k and n, the
+ /// return value is in the range [0, n). Default: h(k, n) = h2(h1(k), n).
+ /// If H is anything other than the default, H1 and H2 are ignored,
+ /// as H is thus overriding H1 and H2.
+ ///
+ /// RehashPolicy: Policy class with three members, all of which govern
+ /// the bucket count. nBucket(n) returns a bucket count no smaller
+ /// than n. GetBucketCount(n) returns a bucket count appropriate
+ /// for an element count of n. GetRehashRequired(nBucketCount, nElementCount, nElementAdd)
+ /// determines whether, if the current bucket count is nBucket and the
+ /// current element count is nElementCount, we need to increase the bucket
+ /// count. If so, returns pair(true, n), where n is the new
+ /// bucket count. If not, returns pair(false, <anything>).
+ ///
+ /// Currently it is hard-wired that the number of buckets never
+ /// shrinks. Should we allow RehashPolicy to change that?
+ ///
+ /// bCacheHashCode: true if we store the value of the hash
+ /// function along with the value. This is a time-space tradeoff.
+ /// Storing it may improve lookup speed by reducing the number of
+ /// times we need to call the Equal function.
+ ///
+ /// bMutableIterators: true if hashtable::iterator is a mutable
+ /// iterator, false if iterator and const_iterator are both const
+ /// iterators. This is true for hash_map and hash_multimap,
+ /// false for hash_set and hash_multiset.
+ ///
+ /// bUniqueKeys: true if the return value of hashtable::count(k)
+ /// is always at most one, false if it may be an arbitrary number.
+ /// This is true for hash_set and hash_map and is false for
+ /// hash_multiset and hash_multimap.
+ ///
+ ///////////////////////////////////////////////////////////////////////
+ /// Note:
+ /// If you want to make a hashtable never increase its bucket usage,
+ /// call set_max_load_factor with a very high value such as 100000.f.
+ ///
+ /// find_as
+ /// In order to support the ability to have a hashtable of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the hashtable key type. See the find_as function
+ /// for more documentation on this.
+ ///
+ /// find_by_hash
+ /// In the interest of supporting fast operations wherever possible,
+ /// we provide a find_by_hash function which finds a node using its
+ /// hash code. This is useful for cases where the node's hash is
+ /// already known, allowing us to avoid a redundant hash operation
+ /// in the normal find path.
+ ///
+ template <typename Key, typename Value, typename Allocator, typename ExtractKey,
+ typename Equal, typename H1, typename H2, typename H,
+ typename RehashPolicy, bool bCacheHashCode, bool bMutableIterators, bool bUniqueKeys>
+ class hashtable
+ : public rehash_base<RehashPolicy, hashtable<Key, Value, Allocator, ExtractKey, Equal, H1, H2, H, RehashPolicy, bCacheHashCode, bMutableIterators, bUniqueKeys> >,
+ public hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, bCacheHashCode>
+ {
+ public:
+ typedef Key key_type;
+ typedef Value value_type;
+ typedef typename ExtractKey::result_type mapped_type;
+ typedef hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, bCacheHashCode> hash_code_base_type;
+ typedef typename hash_code_base_type::hash_code_t hash_code_t;
+ typedef Allocator allocator_type;
+ typedef Equal key_equal;
+ typedef ptrdiff_t difference_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef node_iterator<value_type, !bMutableIterators, bCacheHashCode> local_iterator;
+ typedef node_iterator<value_type, true, bCacheHashCode> const_local_iterator;
+ typedef hashtable_iterator<value_type, !bMutableIterators, bCacheHashCode> iterator;
+ typedef hashtable_iterator<value_type, true, bCacheHashCode> const_iterator;
+ typedef hash_node<value_type, bCacheHashCode> node_type;
+ typedef typename type_select<bUniqueKeys, eastl::pair<iterator, bool>, iterator>::type insert_return_type;
+ typedef hashtable<Key, Value, Allocator, ExtractKey, Equal, H1, H2, H,
+ RehashPolicy, bCacheHashCode, bMutableIterators, bUniqueKeys> this_type;
+ typedef RehashPolicy rehash_policy_type;
+ typedef ExtractKey extract_key_type;
+ typedef H1 h1_type;
+ typedef H2 h2_type;
+ typedef H h_type;
+ typedef integral_constant<bool, bUniqueKeys> has_unique_keys_type;
+
+ using hash_code_base_type::key_eq;
+ using hash_code_base_type::hash_function;
+ using hash_code_base_type::mExtractKey;
+ using hash_code_base_type::get_hash_code;
+ using hash_code_base_type::bucket_index;
+ using hash_code_base_type::compare;
+ using hash_code_base_type::set_code;
+ using hash_code_base_type::copy_code;
+
+ static const bool kCacheHashCode = bCacheHashCode;
+
+ enum
+ {
+ // This enumeration is deprecated in favor of eastl::kHashtableAllocFlagBuckets.
+ kAllocFlagBuckets = eastl::kHashtableAllocFlagBuckets // Flag to allocator which indicates that we are allocating buckets and not nodes.
+ };
+
+ protected:
+ node_type** mpBucketArray;
+ size_type mnBucketCount;
+ size_type mnElementCount;
+ RehashPolicy mRehashPolicy; // To do: Use base class optimization to make this go away.
+ allocator_type mAllocator; // To do: Use base class optimization to make this go away.
+
+ public:
+ hashtable(size_type nBucketCount, const H1&, const H2&, const H&, const Equal&, const ExtractKey&,
+ const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR);
+
+ template <typename FowardIterator>
+ hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount,
+ const H1&, const H2&, const H&, const Equal&, const ExtractKey&,
+ const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR);
+
+ hashtable(const hashtable& x);
+
+ // initializer_list ctor support is implemented in subclasses (e.g. hash_set).
+ // hashtable(initializer_list<value_type>, size_type nBucketCount, const H1&, const H2&, const H&,
+ // const Equal&, const ExtractKey&, const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR);
+
+ hashtable(this_type&& x);
+ hashtable(this_type&& x, const allocator_type& allocator);
+ ~hashtable();
+
+ const allocator_type& get_allocator() const EA_NOEXCEPT;
+ allocator_type& get_allocator() EA_NOEXCEPT;
+ void set_allocator(const allocator_type& allocator);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ iterator begin() EA_NOEXCEPT
+ {
+ iterator i(mpBucketArray);
+ if(!i.mpNode)
+ i.increment_bucket();
+ return i;
+ }
+
+ const_iterator begin() const EA_NOEXCEPT
+ {
+ const_iterator i(mpBucketArray);
+ if(!i.mpNode)
+ i.increment_bucket();
+ return i;
+ }
+
+ const_iterator cbegin() const EA_NOEXCEPT
+ { return begin(); }
+
+ iterator end() EA_NOEXCEPT
+ { return iterator(mpBucketArray + mnBucketCount); }
+
+ const_iterator end() const EA_NOEXCEPT
+ { return const_iterator(mpBucketArray + mnBucketCount); }
+
+ const_iterator cend() const EA_NOEXCEPT
+ { return const_iterator(mpBucketArray + mnBucketCount); }
+
+ // Returns an iterator to the first item in bucket n.
+ local_iterator begin(size_type n) EA_NOEXCEPT
+ { return local_iterator(mpBucketArray[n]); }
+
+ const_local_iterator begin(size_type n) const EA_NOEXCEPT
+ { return const_local_iterator(mpBucketArray[n]); }
+
+ const_local_iterator cbegin(size_type n) const EA_NOEXCEPT
+ { return const_local_iterator(mpBucketArray[n]); }
+
+ // Returns an iterator to the last item in a bucket returned by begin(n).
+ local_iterator end(size_type) EA_NOEXCEPT
+ { return local_iterator(NULL); }
+
+ const_local_iterator end(size_type) const EA_NOEXCEPT
+ { return const_local_iterator(NULL); }
+
+ const_local_iterator cend(size_type) const EA_NOEXCEPT
+ { return const_local_iterator(NULL); }
+
+ bool empty() const EA_NOEXCEPT
+ { return mnElementCount == 0; }
+
+ size_type size() const EA_NOEXCEPT
+ { return mnElementCount; }
+
+ size_type bucket_count() const EA_NOEXCEPT
+ { return mnBucketCount; }
+
+ size_type bucket_size(size_type n) const EA_NOEXCEPT
+ { return (size_type)eastl::distance(begin(n), end(n)); }
+
+ //size_type bucket(const key_type& k) const EA_NOEXCEPT
+ // { return bucket_index(k, (hash code here), (uint32_t)mnBucketCount); }
+
+ // Returns the ratio of element count to bucket count. A return value of 1 means
+ // there's an optimal 1 bucket for each element.
+ float load_factor() const EA_NOEXCEPT
+ { return (float)mnElementCount / (float)mnBucketCount; }
+
+ // Inherited from the base class.
+ // Returns the max load factor, which is the load factor beyond
+ // which we rebuild the container with a new bucket count.
+ // get_max_load_factor comes from rehash_base.
+ // float get_max_load_factor() const;
+
+ // Inherited from the base class.
+ // If you want to make the hashtable never rehash (resize),
+ // set the max load factor to be a very high number (e.g. 100000.f).
+ // set_max_load_factor comes from rehash_base.
+ // void set_max_load_factor(float fMaxLoadFactor);
+
+ /// Generalization of get_max_load_factor. This is an extension that's
+ /// not present in C++ hash tables (unordered containers).
+ const rehash_policy_type& rehash_policy() const EA_NOEXCEPT
+ { return mRehashPolicy; }
+
+ /// Generalization of set_max_load_factor. This is an extension that's
+ /// not present in C++ hash tables (unordered containers).
+ void rehash_policy(const rehash_policy_type& rehashPolicy);
+
+ template <class... Args>
+ insert_return_type emplace(Args&&... args);
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator position, Args&&... args);
+
+ template <class... Args> insert_return_type try_emplace(const key_type& k, Args&&... args);
+ template <class... Args> insert_return_type try_emplace(key_type&& k, Args&&... args);
+ template <class... Args> iterator try_emplace(const_iterator position, const key_type& k, Args&&... args);
+ template <class... Args> iterator try_emplace(const_iterator position, key_type&& k, Args&&... args);
+
+ insert_return_type insert(const value_type& value);
+ insert_return_type insert(value_type&& otherValue);
+ iterator insert(const_iterator hint, const value_type& value);
+ iterator insert(const_iterator hint, value_type&& value);
+ void insert(std::initializer_list<value_type> ilist);
+ template <typename InputIterator> void insert(InputIterator first, InputIterator last);
+ //insert_return_type insert(node_type&& nh);
+ //iterator insert(const_iterator hint, node_type&& nh);
+
+ // This overload attempts to mitigate the overhead associated with mismatched cv-quality elements of
+ // the hashtable pair. It can avoid copy overhead because it will perfect forward the user provided pair types
+ // until it can constructed in-place in the allocated hashtable node.
+ //
+ // Ideally we would remove this overload as it deprecated and removed in C++17 but it currently causes
+ // performance regressions for hashtables with complex keys (keys that allocate resources).
+ template <class P,
+ class = typename eastl::enable_if_t<
+ #if EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR
+ !eastl::is_same_v<eastl::decay_t<P>, key_type> &&
+ #endif
+ !eastl::is_literal_type_v<P> &&
+ eastl::is_constructible_v<value_type, P&&>>>
+ insert_return_type insert(P&& otherValue);
+
+ // Non-standard extension
+ template <class P> // See comments below for the const value_type& equivalent to this function.
+ insert_return_type insert(hash_code_t c, node_type* pNodeNew, P&& otherValue);
+
+ // We provide a version of insert which lets the caller directly specify the hash value and
+ // a potential node to insert if needed. This allows for less thread contention in the case
+ // of a thread-shared hash table that's accessed during a mutex lock, because the hash calculation
+ // and node creation is done outside of the lock. If pNodeNew is supplied by the user (i.e. non-NULL)
+ // then it must be freeable via the hash table's allocator. If the return value is true then this function
+ // took over ownership of pNodeNew, else pNodeNew is still owned by the caller to free or to pass
+ // to another call to insert. pNodeNew need not be assigned the value by the caller, as the insert
+ // function will assign value to pNodeNew upon insertion into the hash table. pNodeNew may be
+ // created by the user with the allocate_uninitialized_node function, and freed by the free_uninitialized_node function.
+ insert_return_type insert(hash_code_t c, node_type* pNodeNew, const value_type& value);
+
+ template <class M> eastl::pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj);
+ template <class M> eastl::pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj);
+ template <class M> iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj);
+ template <class M> iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj);
+
+ // Used to allocate and free memory used by insert(const value_type& value, hash_code_t c, node_type* pNodeNew).
+ node_type* allocate_uninitialized_node();
+ void free_uninitialized_node(node_type* pNode);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ size_type erase(const key_type& k);
+
+ void clear();
+ void clear(bool clearBuckets); // If clearBuckets is true, we free the bucket memory and set the bucket count back to the newly constructed count.
+ void reset_lose_memory() EA_NOEXCEPT; // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+ void rehash(size_type nBucketCount);
+ void reserve(size_type nElementCount);
+
+ iterator find(const key_type& key);
+ const_iterator find(const key_type& key) const;
+
+ /// Implements a find whereby the user supplies a comparison of a different type
+ /// than the hashtable value_type. A useful case of this is one whereby you have
+ /// a container of string objects but want to do searches via passing in char pointers.
+ /// The problem is that without this kind of find, you need to do the expensive operation
+ /// of converting the char pointer to a string so it can be used as the argument to the
+ /// find function.
+ ///
+ /// Example usage (namespaces omitted for brevity):
+ /// hash_set<string> hashSet;
+ /// hashSet.find_as("hello"); // Use default hash and compare.
+ ///
+ /// Example usage (note that the predicate uses string as first type and char* as second):
+ /// hash_set<string> hashSet;
+ /// hashSet.find_as("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ template <typename U, typename UHash, typename BinaryPredicate>
+ iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate);
+
+ template <typename U, typename UHash, typename BinaryPredicate>
+ const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const;
+
+ template <typename U>
+ iterator find_as(const U& u);
+
+ template <typename U>
+ const_iterator find_as(const U& u) const;
+
+ // Note: find_by_hash and find_range_by_hash both perform a search based on a hash value.
+ // It is important to note that multiple hash values may map to the same hash bucket, so
+ // it would be incorrect to assume all items returned match the hash value that
+ // was searched for.
+
+ /// Implements a find whereby the user supplies the node's hash code.
+ /// It returns an iterator to the first element that matches the given hash. However, there may be multiple elements that match the given hash.
+
+ template<typename HashCodeT>
+ ENABLE_IF_HASHCODE_EASTLSIZET(HashCodeT, iterator) find_by_hash(HashCodeT c)
+ {
+ EASTL_CT_ASSERT_MSG(bCacheHashCode,
+ "find_by_hash(hash_code_t c) is designed to avoid recomputing hashes, "
+ "so it requires cached hash codes. Consider setting template parameter "
+ "bCacheHashCode to true or using find_by_hash(const key_type& k, hash_code_t c) instead.");
+
+ const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], c);
+
+ return pNode ? iterator(pNode, mpBucketArray + n) :
+ iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+ template<typename HashCodeT>
+ ENABLE_IF_HASHCODE_EASTLSIZET(HashCodeT, const_iterator) find_by_hash(HashCodeT c) const
+ {
+ EASTL_CT_ASSERT_MSG(bCacheHashCode,
+ "find_by_hash(hash_code_t c) is designed to avoid recomputing hashes, "
+ "so it requires cached hash codes. Consider setting template parameter "
+ "bCacheHashCode to true or using find_by_hash(const key_type& k, hash_code_t c) instead.");
+
+ const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], c);
+
+ return pNode ?
+ const_iterator(pNode, mpBucketArray + n) :
+ const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+ iterator find_by_hash(const key_type& k, hash_code_t c)
+ {
+ const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+ return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+ const_iterator find_by_hash(const key_type& k, hash_code_t c) const
+ {
+ const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+ return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+ // Returns a pair that allows iterating over all nodes in a hash bucket
+ // first in the pair returned holds the iterator for the beginning of the bucket,
+ // second in the pair returned holds the iterator for the end of the bucket,
+ // If no bucket is found, both values in the pair are set to end().
+ //
+ // See also the note above.
+ eastl::pair<iterator, iterator> find_range_by_hash(hash_code_t c);
+ eastl::pair<const_iterator, const_iterator> find_range_by_hash(hash_code_t c) const;
+
+ size_type count(const key_type& k) const EA_NOEXCEPT;
+
+ eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ // We must remove one of the 'DoGetResultIterator' overloads from the overload-set (via SFINAE) because both can
+ // not compile successfully at the same time. The 'bUniqueKeys' template parameter chooses at compile-time the
+ // type of 'insert_return_type' between a pair<iterator,bool> and a raw iterator. We must pick between the two
+ // overloads that unpacks the iterator from the pair or simply passes the provided iterator to the caller based
+ // on the class template parameter.
+ template <typename BoolConstantT>
+ iterator DoGetResultIterator(BoolConstantT,
+ const insert_return_type& irt,
+ ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr) const EA_NOEXCEPT
+ {
+ return irt.first;
+ }
+
+ template <typename BoolConstantT>
+ iterator DoGetResultIterator(BoolConstantT,
+ const insert_return_type& irt,
+ DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr) const EA_NOEXCEPT
+ {
+ return irt;
+ }
+
+ node_type* DoAllocateNodeFromKey(const key_type& key);
+ node_type* DoAllocateNodeFromKey(key_type&& key);
+ void DoFreeNode(node_type* pNode);
+ void DoFreeNodes(node_type** pBucketArray, size_type);
+
+ node_type** DoAllocateBuckets(size_type n);
+ void DoFreeBuckets(node_type** pBucketArray, size_type n);
+
+ template <typename BoolConstantT, class... Args, ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr>
+ eastl::pair<iterator, bool> DoInsertValue(BoolConstantT, Args&&... args);
+
+ template <typename BoolConstantT, class... Args, DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr>
+ iterator DoInsertValue(BoolConstantT, Args&&... args);
+
+
+ template <typename BoolConstantT>
+ eastl::pair<iterator, bool> DoInsertValueExtra(BoolConstantT,
+ const key_type& k,
+ hash_code_t c,
+ node_type* pNodeNew,
+ value_type&& value,
+ ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ eastl::pair<iterator, bool> DoInsertValue(BoolConstantT,
+ value_type&& value,
+ ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ iterator DoInsertValueExtra(BoolConstantT,
+ const key_type& k,
+ hash_code_t c,
+ node_type* pNodeNew,
+ value_type&& value,
+ DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ iterator DoInsertValue(BoolConstantT, value_type&& value, DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+
+ template <typename BoolConstantT>
+ eastl::pair<iterator, bool> DoInsertValueExtra(BoolConstantT,
+ const key_type& k,
+ hash_code_t c,
+ node_type* pNodeNew,
+ const value_type& value,
+ ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ eastl::pair<iterator, bool> DoInsertValue(BoolConstantT,
+ const value_type& value,
+ ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ iterator DoInsertValueExtra(BoolConstantT,
+ const key_type& k,
+ hash_code_t c,
+ node_type* pNodeNew,
+ const value_type& value,
+ DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <typename BoolConstantT>
+ iterator DoInsertValue(BoolConstantT, const value_type& value, DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr);
+
+ template <class... Args>
+ node_type* DoAllocateNode(Args&&... args);
+ node_type* DoAllocateNode(value_type&& value);
+ node_type* DoAllocateNode(const value_type& value);
+
+ // DoInsertKey is supposed to get hash_code_t c = get_hash_code(key).
+ // it is done in case application has it's own hashset/hashmap-like containter, where hash code is for some reason known prior the insert
+ // this allows to save some performance, especially with heavy hash functions
+ eastl::pair<iterator, bool> DoInsertKey(true_type, const key_type& key, hash_code_t c);
+ iterator DoInsertKey(false_type, const key_type& key, hash_code_t c);
+ eastl::pair<iterator, bool> DoInsertKey(true_type, key_type&& key, hash_code_t c);
+ iterator DoInsertKey(false_type, key_type&& key, hash_code_t c);
+
+ // We keep DoInsertKey overload without third parameter, for compatibility with older revisions of EASTL (3.12.07 and earlier)
+ // It used to call get_hash_code as a first call inside the DoInsertKey.
+ eastl::pair<iterator, bool> DoInsertKey(true_type, const key_type& key) { return DoInsertKey(true_type(), key, get_hash_code(key)); }
+ iterator DoInsertKey(false_type, const key_type& key) { return DoInsertKey(false_type(), key, get_hash_code(key)); }
+ eastl::pair<iterator, bool> DoInsertKey(true_type, key_type&& key) { return DoInsertKey(true_type(), eastl::move(key), get_hash_code(key)); }
+ iterator DoInsertKey(false_type, key_type&& key) { return DoInsertKey(false_type(), eastl::move(key), get_hash_code(key)); }
+
+ void DoRehash(size_type nBucketCount);
+ node_type* DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const;
+
+ template <typename T>
+ ENABLE_IF_HAS_HASHCODE(T, node_type) DoFindNode(T* pNode, hash_code_t c) const
+ {
+ for (; pNode; pNode = pNode->mpNext)
+ {
+ if (pNode->mnHashCode == c)
+ return pNode;
+ }
+ return NULL;
+ }
+
+ template <typename U, typename BinaryPredicate>
+ node_type* DoFindNodeT(node_type* pNode, const U& u, BinaryPredicate predicate) const;
+
+ }; // class hashtable
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // node_iterator_base
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator==(const node_iterator_base<Value, bCacheHashCode>& a, const node_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode == b.mpNode; }
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator!=(const node_iterator_base<Value, bCacheHashCode>& a, const node_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode != b.mpNode; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // hashtable_iterator_base
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator==(const hashtable_iterator_base<Value, bCacheHashCode>& a, const hashtable_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode == b.mpNode; }
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator!=(const hashtable_iterator_base<Value, bCacheHashCode>& a, const hashtable_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode != b.mpNode; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // hashtable
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>
+ ::hashtable(size_type nBucketCount, const H1& h1, const H2& h2, const H& h,
+ const Eq& eq, const EK& ek, const allocator_type& allocator)
+ : rehash_base<RP, hashtable>(),
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(ek, eq, h1, h2, h),
+ mnBucketCount(0),
+ mnElementCount(0),
+ mRehashPolicy(),
+ mAllocator(allocator)
+ {
+ if(nBucketCount < 2) // If we are starting in an initially empty state, with no memory allocation done.
+ reset_lose_memory();
+ else // Else we are creating a potentially non-empty hashtable...
+ {
+ EASTL_ASSERT(nBucketCount < 10000000);
+ mnBucketCount = (size_type)mRehashPolicy.GetNextBucketCount((uint32_t)nBucketCount);
+ mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2.
+ }
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename FowardIterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount,
+ const H1& h1, const H2& h2, const H& h,
+ const Eq& eq, const EK& ek, const allocator_type& allocator)
+ : rehash_base<rehash_policy_type, hashtable>(),
+ hash_code_base<key_type, value_type, extract_key_type, key_equal, h1_type, h2_type, h_type, kCacheHashCode>(ek, eq, h1, h2, h),
+ //mnBucketCount(0), // This gets re-assigned below.
+ mnElementCount(0),
+ mRehashPolicy(),
+ mAllocator(allocator)
+ {
+ if(nBucketCount < 2)
+ {
+ const size_type nElementCount = (size_type)eastl::ht_distance(first, last);
+ mnBucketCount = (size_type)mRehashPolicy.GetBucketCount((uint32_t)nElementCount);
+ }
+ else
+ {
+ EASTL_ASSERT(nBucketCount < 10000000);
+ mnBucketCount = nBucketCount;
+ }
+
+ mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(; first != last; ++first)
+ insert(*first);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ clear();
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ throw;
+ }
+ #endif
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(const this_type& x)
+ : rehash_base<RP, hashtable>(x),
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(x),
+ mnBucketCount(x.mnBucketCount),
+ mnElementCount(x.mnElementCount),
+ mRehashPolicy(x.mRehashPolicy),
+ mAllocator(x.mAllocator)
+ {
+ if(mnElementCount) // If there is anything to copy...
+ {
+ mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will be at least 2.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(size_type i = 0; i < x.mnBucketCount; ++i)
+ {
+ node_type* pNodeSource = x.mpBucketArray[i];
+ node_type** ppNodeDest = mpBucketArray + i;
+
+ while(pNodeSource)
+ {
+ *ppNodeDest = DoAllocateNode(pNodeSource->mValue);
+ copy_code(*ppNodeDest, pNodeSource);
+ ppNodeDest = &(*ppNodeDest)->mpNext;
+ pNodeSource = pNodeSource->mpNext;
+ }
+ }
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ clear();
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ throw;
+ }
+ #endif
+ }
+ else
+ {
+ // In this case, instead of allocate memory and copy nothing from x,
+ // we reset ourselves to a zero allocation state.
+ reset_lose_memory();
+ }
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(this_type&& x)
+ : rehash_base<RP, hashtable>(x),
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(x),
+ mnBucketCount(0),
+ mnElementCount(0),
+ mRehashPolicy(x.mRehashPolicy),
+ mAllocator(x.mAllocator)
+ {
+ reset_lose_memory(); // We do this here the same as we do it in the default ctor because it puts the container in a proper initial empty state. This code would be cleaner if we could rely on being able to use C++11 delegating constructors and just call the default ctor here.
+ swap(x);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(this_type&& x, const allocator_type& allocator)
+ : rehash_base<RP, hashtable>(x),
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(x),
+ mnBucketCount(0),
+ mnElementCount(0),
+ mRehashPolicy(x.mRehashPolicy),
+ mAllocator(allocator)
+ {
+ reset_lose_memory(); // We do this here the same as we do it in the default ctor because it puts the container in a proper initial empty state. This code would be cleaner if we could rely on being able to use C++11 delegating constructors and just call the default ctor here.
+ swap(x); // swap will directly or indirectly handle the possibility that mAllocator != x.mAllocator.
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline const typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::allocator_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::get_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::allocator_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::get_allocator() EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::set_allocator(const allocator_type& allocator)
+ {
+ mAllocator = allocator;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::this_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mAllocator = x.mAllocator;
+ #endif
+
+ insert(x.begin(), x.end());
+ }
+ return *this;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::this_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::operator=(this_type&& x)
+ {
+ if(this != &x)
+ {
+ clear(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor.
+ swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy.
+ }
+ return *this;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::this_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::operator=(std::initializer_list<value_type> ilist)
+ {
+ // The simplest means of doing this is to clear and insert. There probably isn't a generic
+ // solution that's any more efficient without having prior knowledge of the ilist contents.
+ clear();
+ insert(ilist.begin(), ilist.end());
+ return *this;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::~hashtable()
+ {
+ clear();
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNodeFromKey(const key_type& key)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(pair_first_construct, key);
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNodeFromKey(key_type&& key)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(pair_first_construct, eastl::move(key));
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeNode(node_type* pNode)
+ {
+ pNode->~node_type();
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeNodes(node_type** pNodeArray, size_type n)
+ {
+ for(size_type i = 0; i < n; ++i)
+ {
+ node_type* pNode = pNodeArray[i];
+ while(pNode)
+ {
+ node_type* const pTempNode = pNode;
+ pNode = pNode->mpNext;
+ DoFreeNode(pTempNode);
+ }
+ pNodeArray[i] = NULL;
+ }
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type**
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateBuckets(size_type n)
+ {
+ // We allocate one extra bucket to hold a sentinel, an arbitrary
+ // non-null pointer. Iterator increment relies on this.
+ EASTL_ASSERT(n > 1); // We reserve an mnBucketCount of 1 for the shared gpEmptyBucketArray.
+ EASTL_CT_ASSERT(kHashtableAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the allocator has a copy of this enum.
+ node_type** const pBucketArray = (node_type**)EASTLAllocAlignedFlags(mAllocator, (n + 1) * sizeof(node_type*), EASTL_ALIGN_OF(node_type*), 0, kHashtableAllocFlagBuckets);
+ //eastl::fill(pBucketArray, pBucketArray + n, (node_type*)NULL);
+ memset(pBucketArray, 0, n * sizeof(node_type*));
+ pBucketArray[n] = reinterpret_cast<node_type*>((uintptr_t)~0);
+ return pBucketArray;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeBuckets(node_type** pBucketArray, size_type n)
+ {
+ // If n <= 1, then pBucketArray is from the shared gpEmptyBucketArray. We don't test
+ // for pBucketArray == &gpEmptyBucketArray because one library have a different gpEmptyBucketArray
+ // than another but pass a hashtable to another. So we go by the size.
+ if(n > 1)
+ EASTLFree(mAllocator, pBucketArray, (n + 1) * sizeof(node_type*)); // '+1' because DoAllocateBuckets allocates nBucketCount + 1 buckets in order to have a NULL sentinel at the end.
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::swap(this_type& x)
+ {
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>::base_swap(x); // hash_code_base has multiple implementations, so we let them handle the swap.
+ eastl::swap(mRehashPolicy, x.mRehashPolicy);
+ EASTL_MACRO_SWAP(node_type**, mpBucketArray, x.mpBucketArray);
+ eastl::swap(mnBucketCount, x.mnBucketCount);
+ eastl::swap(mnElementCount, x.mnElementCount);
+
+ if (mAllocator != x.mAllocator) // If allocators are not equivalent...
+ {
+ eastl::swap(mAllocator, x.mAllocator);
+ }
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::rehash_policy(const rehash_policy_type& rehashPolicy)
+ {
+ mRehashPolicy = rehashPolicy;
+
+ const size_type nBuckets = rehashPolicy.GetBucketCount((uint32_t)mnElementCount);
+
+ if(nBuckets > mnBucketCount)
+ DoRehash(nBuckets);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find(const key_type& k)
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+ return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find(const key_type& k) const
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+ return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U, typename UHash, typename BinaryPredicate>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate)
+ {
+ const hash_code_t c = (hash_code_t)uhash(other);
+ const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy.
+
+ node_type* const pNode = DoFindNodeT(mpBucketArray[n], other, predicate);
+ return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U, typename UHash, typename BinaryPredicate>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate) const
+ {
+ const hash_code_t c = (hash_code_t)uhash(other);
+ const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy.
+
+ node_type* const pNode = DoFindNodeT(mpBucketArray[n], other, predicate);
+ return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+ /// hashtable_find
+ ///
+ /// Helper function that defaults to using hash<U> and equal_to_2<T, U>.
+ /// This makes it so that by default you don't need to provide these.
+ /// Note that the default hash functions may not be what you want, though.
+ ///
+ /// Example usage. Instead of this:
+ /// hash_set<string> hashSet;
+ /// hashSet.find("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ /// You can use this:
+ /// hash_set<string> hashSet;
+ /// hashtable_find(hashSet, "hello");
+ ///
+ template <typename H, typename U>
+ inline typename H::iterator hashtable_find(H& hashTable, U u)
+ { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
+
+ template <typename H, typename U>
+ inline typename H::const_iterator hashtable_find(const H& hashTable, U u)
+ { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other)
+ { return eastl::hashtable_find(*this, other); }
+ // VC++ doesn't appear to like the following, though it seems correct to me.
+ // So we implement the workaround above until we can straighten this out.
+ //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other) const
+ { return eastl::hashtable_find(*this, other); }
+ // VC++ doesn't appear to like the following, though it seems correct to me.
+ // So we implement the workaround above until we can straighten this out.
+ //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator,
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_range_by_hash(hash_code_t c) const
+ {
+ const size_type start = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+ node_type* const pNodeStart = mpBucketArray[start];
+
+ if (pNodeStart)
+ {
+ eastl::pair<const_iterator, const_iterator> pair(const_iterator(pNodeStart, mpBucketArray + start),
+ const_iterator(pNodeStart, mpBucketArray + start));
+ pair.second.increment_bucket();
+ return pair;
+ }
+
+ return eastl::pair<const_iterator, const_iterator>(const_iterator(mpBucketArray + mnBucketCount),
+ const_iterator(mpBucketArray + mnBucketCount));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator,
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_range_by_hash(hash_code_t c)
+ {
+ const size_type start = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+ node_type* const pNodeStart = mpBucketArray[start];
+
+ if (pNodeStart)
+ {
+ eastl::pair<iterator, iterator> pair(iterator(pNodeStart, mpBucketArray + start),
+ iterator(pNodeStart, mpBucketArray + start));
+ pair.second.increment_bucket();
+ return pair;
+
+ }
+
+ return eastl::pair<iterator, iterator>(iterator(mpBucketArray + mnBucketCount),
+ iterator(mpBucketArray + mnBucketCount));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::size_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::count(const key_type& k) const EA_NOEXCEPT
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ size_type result = 0;
+
+ // To do: Make a specialization for bU (unique keys) == true and take
+ // advantage of the fact that the count will always be zero or one in that case.
+ for(node_type* pNode = mpBucketArray[n]; pNode; pNode = pNode->mpNext)
+ {
+ if(compare(k, c, pNode))
+ ++result;
+ }
+ return result;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator,
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::equal_range(const key_type& k)
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ node_type** head = mpBucketArray + n;
+ node_type* pNode = DoFindNode(*head, k, c);
+
+ if(pNode)
+ {
+ node_type* p1 = pNode->mpNext;
+
+ for(; p1; p1 = p1->mpNext)
+ {
+ if(!compare(k, c, p1))
+ break;
+ }
+
+ iterator first(pNode, head);
+ iterator last(p1, head);
+
+ if(!p1)
+ last.increment_bucket();
+
+ return eastl::pair<iterator, iterator>(first, last);
+ }
+
+ return eastl::pair<iterator, iterator>(iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end()
+ iterator(mpBucketArray + mnBucketCount));
+ }
+
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator,
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::equal_range(const key_type& k) const
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ node_type** head = mpBucketArray + n;
+ node_type* pNode = DoFindNode(*head, k, c);
+
+ if(pNode)
+ {
+ node_type* p1 = pNode->mpNext;
+
+ for(; p1; p1 = p1->mpNext)
+ {
+ if(!compare(k, c, p1))
+ break;
+ }
+
+ const_iterator first(pNode, head);
+ const_iterator last(p1, head);
+
+ if(!p1)
+ last.increment_bucket();
+
+ return eastl::pair<const_iterator, const_iterator>(first, last);
+ }
+
+ return eastl::pair<const_iterator, const_iterator>(const_iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end()
+ const_iterator(mpBucketArray + mnBucketCount));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const
+ {
+ for(; pNode; pNode = pNode->mpNext)
+ {
+ if(compare(k, c, pNode))
+ return pNode;
+ }
+ return NULL;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U, typename BinaryPredicate>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFindNodeT(node_type* pNode, const U& other, BinaryPredicate predicate) const
+ {
+ for(; pNode; pNode = pNode->mpNext)
+ {
+ if(predicate(mExtractKey(pNode->mValue), other)) // Intentionally compare with key as first arg and other as second arg.
+ return pNode;
+ }
+ return NULL;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT, class... Args, ENABLE_IF_TRUETYPE(BoolConstantT)>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, Args&&... args) // true_type means bUniqueKeys is true.
+ {
+ // Adds the value to the hash table if not already present.
+ // If already present then the existing value is returned via an iterator/bool pair.
+
+ // We have a chicken-and-egg problem here. In order to know if and where to insert the value, we need to get the
+ // hashtable key for the value. But we don't explicitly have a value argument, we have a templated Args&&... argument.
+ // We need the value_type in order to proceed, but that entails getting an instance of a value_type from the args.
+ // And it may turn out that the value is already present in the hashtable and we need to cancel the insertion,
+ // despite having obtained a value_type to put into the hashtable. We have mitigated this problem somewhat by providing
+ // specializations of the insert function for const value_type& and value_type&&, and so the only time this function
+ // should get called is when args refers to arguments to construct a value_type.
+
+ node_type* const pNodeNew = DoAllocateNode(eastl::forward<Args>(args)...);
+ const key_type& k = mExtractKey(pNodeNew->mValue);
+ const hash_code_t c = get_hash_code(k);
+ size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNode == NULL) // If value is not present... add it.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(bRehash.first)
+ {
+ n = (size_type)bucket_index(k, c, (uint32_t)bRehash.second);
+ DoRehash(bRehash.second);
+ }
+
+ EASTL_ASSERT((uintptr_t)mpBucketArray != (uintptr_t)&gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNodeNew);
+ throw;
+ }
+ #endif
+ }
+ else
+ {
+ // To do: We have an inefficiency to deal with here. We allocated a node above but we are freeing it here because
+ // it turned out it wasn't needed. But we needed to create the node in order to get the hashtable key for
+ // the node. One possible resolution is to create specializations: DoInsertValue(true_type, value_type&&) and
+ // DoInsertValue(true_type, const value_type&) which don't need to create a node up front in order to get the
+ // hashtable key. Probably most users would end up using these pathways instead of this Args... pathway.
+ // While we should considering handling this to-do item, a lot of the performance limitations of maps and sets
+ // in practice is with finding elements rather than adding (potentially redundant) new elements.
+ DoFreeNode(pNodeNew);
+ }
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT, class... Args, DISABLE_IF_TRUETYPE(BoolConstantT)>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, Args&&... args) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second);
+
+ node_type* pNodeNew = DoAllocateNode(eastl::forward<Args>(args)...);
+ const key_type& k = mExtractKey(pNodeNew->mValue);
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class... Args>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNode(Args&&... args)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(eastl::forward<Args>(args)...);
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
+ // Note: The following insertion-related functions are nearly copies of the above three functions,
+ // but are for value_type&& and const value_type& arguments. It's useful for us to have the functions
+ // below, even when using a fully compliant C++11 compiler that supports the above functions.
+ // The reason is because the specializations below are slightly more efficient because they can delay
+ // the creation of a node until it's known that it will be needed.
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValueExtra(BoolConstantT, const key_type& k,
+ hash_code_t c, node_type* pNodeNew, value_type&& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true.
+ {
+ // Adds the value to the hash table if not already present.
+ // If already present then the existing value is returned via an iterator/bool pair.
+ size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNode == NULL) // If value is not present... add it.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ // Allocate the new node before doing the rehash so that we don't
+ // do a rehash if the allocation throws.
+ #if EASTL_EXCEPTIONS_ENABLED
+ bool nodeAllocated; // If exceptions are enabled then we we need to track if we allocated the node so we can free it in the catch block.
+ #endif
+
+ if(pNodeNew)
+ {
+ ::new(eastl::addressof(pNodeNew->mValue)) value_type(eastl::move(value)); // It's expected that pNodeNew was allocated with allocate_uninitialized_node.
+ #if EASTL_EXCEPTIONS_ENABLED
+ nodeAllocated = false;
+ #endif
+ }
+ else
+ {
+ pNodeNew = DoAllocateNode(eastl::move(value));
+ #if EASTL_EXCEPTIONS_ENABLED
+ nodeAllocated = true;
+ #endif
+ }
+
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(bRehash.first)
+ {
+ n = (size_type)bucket_index(k, c, (uint32_t)bRehash.second);
+ DoRehash(bRehash.second);
+ }
+
+ EASTL_ASSERT((uintptr_t)mpBucketArray != (uintptr_t)&gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ if(nodeAllocated) // If we allocated the node within this function, free it. Else let the caller retain ownership of it.
+ DoFreeNode(pNodeNew);
+ throw;
+ }
+ #endif
+ }
+ // Else the value is already present, so don't add a new node. And don't free pNodeNew.
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, value_type&& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true.
+ {
+ const key_type& k = mExtractKey(value);
+ const hash_code_t c = get_hash_code(k);
+
+ return DoInsertValueExtra(true_type(), k, c, NULL, eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValueExtra(BoolConstantT, const key_type& k, hash_code_t c, node_type* pNodeNew, value_type&& value,
+ DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second); // Note: We don't need to wrap this call with try/catch because there's nothing we would need to do in the catch.
+
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ if(pNodeNew)
+ ::new(eastl::addressof(pNodeNew->mValue)) value_type(eastl::move(value)); // It's expected that pNodeNew was allocated with allocate_uninitialized_node.
+ else
+ pNodeNew = DoAllocateNode(eastl::move(value));
+
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template<typename BoolConstantT>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, value_type&& value, DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false.
+ {
+ const key_type& k = mExtractKey(value);
+ const hash_code_t c = get_hash_code(k);
+
+ return DoInsertValueExtra(false_type(), k, c, NULL, eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNode(value_type&& value)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(eastl::move(value));
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template<typename BoolConstantT>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValueExtra(BoolConstantT, const key_type& k, hash_code_t c, node_type* pNodeNew, const value_type& value,
+ ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true.
+ {
+ // Adds the value to the hash table if not already present.
+ // If already present then the existing value is returned via an iterator/bool pair.
+ size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNode == NULL) // If value is not present... add it.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ // Allocate the new node before doing the rehash so that we don't
+ // do a rehash if the allocation throws.
+ #if EASTL_EXCEPTIONS_ENABLED
+ bool nodeAllocated; // If exceptions are enabled then we we need to track if we allocated the node so we can free it in the catch block.
+ #endif
+
+ if(pNodeNew)
+ {
+ ::new(eastl::addressof(pNodeNew->mValue)) value_type(value); // It's expected that pNodeNew was allocated with allocate_uninitialized_node.
+ #if EASTL_EXCEPTIONS_ENABLED
+ nodeAllocated = false;
+ #endif
+ }
+ else
+ {
+ pNodeNew = DoAllocateNode(value);
+ #if EASTL_EXCEPTIONS_ENABLED
+ nodeAllocated = true;
+ #endif
+ }
+
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(bRehash.first)
+ {
+ n = (size_type)bucket_index(k, c, (uint32_t)bRehash.second);
+ DoRehash(bRehash.second);
+ }
+
+ EASTL_ASSERT((uintptr_t)mpBucketArray != (uintptr_t)&gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ if(nodeAllocated) // If we allocated the node within this function, free it. Else let the caller retain ownership of it.
+ DoFreeNode(pNodeNew);
+ throw;
+ }
+ #endif
+ }
+ // Else the value is already present, so don't add a new node. And don't free pNodeNew.
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template<typename BoolConstantT>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, const value_type& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true.
+ {
+ const key_type& k = mExtractKey(value);
+ const hash_code_t c = get_hash_code(k);
+
+ return DoInsertValueExtra(true_type(), k, c, NULL, value);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename BoolConstantT>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValueExtra(BoolConstantT, const key_type& k, hash_code_t c, node_type* pNodeNew, const value_type& value,
+ DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second); // Note: We don't need to wrap this call with try/catch because there's nothing we would need to do in the catch.
+
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ if(pNodeNew)
+ ::new(eastl::addressof(pNodeNew->mValue)) value_type(value); // It's expected that pNodeNew was allocated with allocate_uninitialized_node.
+ else
+ pNodeNew = DoAllocateNode(value);
+
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template<typename BoolConstantT>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(BoolConstantT, const value_type& value, DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false.
+ {
+ const key_type& k = mExtractKey(value);
+ const hash_code_t c = get_hash_code(k);
+
+ return DoInsertValueExtra(false_type(), k, c, NULL, value);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNode(const value_type& value)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(value);
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::allocate_uninitialized_node()
+ {
+ // We don't wrap this in try/catch because users of this function are expected to do that themselves as needed.
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+ // Leave pNode->mValue uninitialized.
+ pNode->mpNext = NULL;
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::free_uninitialized_node(node_type* pNode)
+ {
+ // pNode->mValue is expected to be uninitialized.
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(true_type, const key_type& key, const hash_code_t c) // true_type means bUniqueKeys is true.
+ {
+ size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
+ node_type* const pNode = DoFindNode(mpBucketArray[n], key, c);
+
+ if(pNode == NULL)
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ // Allocate the new node before doing the rehash so that we don't
+ // do a rehash if the allocation throws.
+ node_type* const pNodeNew = DoAllocateNodeFromKey(key);
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(bRehash.first)
+ {
+ n = (size_type)bucket_index(key, c, (uint32_t)bRehash.second);
+ DoRehash(bRehash.second);
+ }
+
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNodeNew);
+ throw;
+ }
+ #endif
+ }
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(false_type, const key_type& key, const hash_code_t c) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second);
+
+ const size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
+
+ node_type* const pNodeNew = DoAllocateNodeFromKey(key);
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], key, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(true_type, key_type&& key, const hash_code_t c) // true_type means bUniqueKeys is true.
+ {
+ size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
+ node_type* const pNode = DoFindNode(mpBucketArray[n], key, c);
+
+ if(pNode == NULL)
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ // Allocate the new node before doing the rehash so that we don't
+ // do a rehash if the allocation throws.
+ node_type* const pNodeNew = DoAllocateNodeFromKey(eastl::move(key));
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(bRehash.first)
+ {
+ n = (size_type)bucket_index(key, c, (uint32_t)bRehash.second);
+ DoRehash(bRehash.second);
+ }
+
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNodeNew);
+ throw;
+ }
+ #endif
+ }
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(false_type, key_type&& key, const hash_code_t c) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second);
+
+ const size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
+
+ node_type* const pNodeNew = DoAllocateNodeFromKey(eastl::move(key));
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], key, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class... Args>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::emplace(Args&&... args)
+ {
+ return DoInsertValue(has_unique_keys_type(), eastl::forward<Args>(args)...); // Need to use forward instead of move because Args&& is a "universal reference" instead of an rvalue reference.
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class... Args>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::emplace_hint(const_iterator, Args&&... args)
+ {
+ // We currently ignore the iterator argument as a hint.
+ insert_return_type result = DoInsertValue(has_unique_keys_type(), eastl::forward<Args>(args)...);
+ return DoGetResultIterator(has_unique_keys_type(), result);
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class... Args>
+ // inline eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::try_emplace(const key_type& key, Args&&... args)
+ {
+ return DoInsertValue(has_unique_keys_type(), piecewise_construct, eastl::forward_as_tuple(key),
+ eastl::forward_as_tuple(eastl::forward<Args>(args)...));
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class... Args>
+ // inline eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::try_emplace(key_type&& key, Args&&... args)
+ {
+ return DoInsertValue(has_unique_keys_type(), piecewise_construct, eastl::forward_as_tuple(eastl::move(key)),
+ eastl::forward_as_tuple(eastl::forward<Args>(args)...));
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class... Args>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::try_emplace(const_iterator, const key_type& key, Args&&... args)
+ {
+ insert_return_type result = DoInsertValue(
+ has_unique_keys_type(),
+ value_type(piecewise_construct, eastl::forward_as_tuple(key), eastl::forward_as_tuple(eastl::forward<Args>(args)...)));
+
+ return DoGetResultIterator(has_unique_keys_type(), result);
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class... Args>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::try_emplace(const_iterator, key_type&& key, Args&&... args)
+ {
+ insert_return_type result =
+ DoInsertValue(has_unique_keys_type(), value_type(piecewise_construct, eastl::forward_as_tuple(eastl::move(key)),
+ eastl::forward_as_tuple(eastl::forward<Args>(args)...)));
+
+ return DoGetResultIterator(has_unique_keys_type(), result);
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(value_type&& otherValue)
+ {
+ return DoInsertValue(has_unique_keys_type(), eastl::move(otherValue));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class P>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(hash_code_t c, node_type* pNodeNew, P&& otherValue)
+ {
+ // pNodeNew->mValue is expected to be uninitialized.
+ value_type value(eastl::forward<P>(otherValue)); // Need to use forward instead of move because P&& is a "universal reference" instead of an rvalue reference.
+ const key_type& k = mExtractKey(value);
+ return DoInsertValueExtra(has_unique_keys_type(), k, c, pNodeNew, eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(const_iterator, value_type&& value)
+ {
+ // We currently ignore the iterator argument as a hint.
+ insert_return_type result = DoInsertValue(has_unique_keys_type(), value_type(eastl::move(value)));
+ return DoGetResultIterator(has_unique_keys_type(), result);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(const value_type& value)
+ {
+ return DoInsertValue(has_unique_keys_type(), value);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(hash_code_t c, node_type* pNodeNew, const value_type& value)
+ {
+ // pNodeNew->mValue is expected to be uninitialized.
+ const key_type& k = mExtractKey(value);
+ return DoInsertValueExtra(has_unique_keys_type(), k, c, pNodeNew, value);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename P, class>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(P&& otherValue)
+ {
+ return emplace(eastl::forward<P>(otherValue));
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(const_iterator, const value_type& value)
+ {
+ // We ignore the first argument (hint iterator). It's not likely to be useful for hashtable containers.
+ insert_return_type result = DoInsertValue(has_unique_keys_type(), value);
+ return DoGetResultIterator(has_unique_keys_type(), result);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(std::initializer_list<value_type> ilist)
+ {
+ insert(ilist.begin(), ilist.end());
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename InputIterator>
+ void
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(InputIterator first, InputIterator last)
+ {
+ const uint32_t nElementAdd = (uint32_t)eastl::ht_distance(first, last);
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, nElementAdd);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second);
+
+ for(; first != last; ++first)
+ DoInsertValue(has_unique_keys_type(), *first);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class M>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_or_assign(const key_type& k, M&& obj)
+ {
+ auto iter = find(k);
+ if(iter == end())
+ {
+ return insert(value_type(piecewise_construct, eastl::forward_as_tuple(k), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return {iter, false};
+ }
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class M>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_or_assign(key_type&& k, M&& obj)
+ {
+ auto iter = find(k);
+ if(iter == end())
+ {
+ return insert(value_type(piecewise_construct, eastl::forward_as_tuple(eastl::move(k)), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return {iter, false};
+ }
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class M>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_or_assign(const_iterator, const key_type& k, M&& obj)
+ {
+ return insert_or_assign(k, eastl::forward<M>(obj)).first; // we ignore the iterator hint
+ }
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <class M>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_or_assign(const_iterator, key_type&& k, M&& obj)
+ {
+ return insert_or_assign(eastl::move(k), eastl::forward<M>(obj)).first; // we ignore the iterator hint
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(const_iterator i)
+ {
+ iterator iNext(i.mpNode, i.mpBucket); // Convert from const_iterator to iterator while constructing.
+ ++iNext;
+
+ node_type* pNode = i.mpNode;
+ node_type* pNodeCurrent = *i.mpBucket;
+
+ if(pNodeCurrent == pNode)
+ *i.mpBucket = pNodeCurrent->mpNext;
+ else
+ {
+ // We have a singly-linked list, so we have no choice but to
+ // walk down it till we find the node before the node at 'i'.
+ node_type* pNodeNext = pNodeCurrent->mpNext;
+
+ while(pNodeNext != pNode)
+ {
+ pNodeCurrent = pNodeNext;
+ pNodeNext = pNodeCurrent->mpNext;
+ }
+
+ pNodeCurrent->mpNext = pNodeNext->mpNext;
+ }
+
+ DoFreeNode(pNode);
+ --mnElementCount;
+
+ return iNext;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(const_iterator first, const_iterator last)
+ {
+ while(first != last)
+ first = erase(first);
+ return iterator(first.mpNode, first.mpBucket);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::size_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(const key_type& k)
+ {
+ // To do: Reimplement this function to do a single loop and not try to be
+ // smart about element contiguity. The mechanism here is only a benefit if the
+ // buckets are heavily overloaded; otherwise this mechanism may be slightly slower.
+
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ const size_type nElementCountSaved = mnElementCount;
+
+ node_type** pBucketArray = mpBucketArray + n;
+
+ while(*pBucketArray && !compare(k, c, *pBucketArray))
+ pBucketArray = &(*pBucketArray)->mpNext;
+
+ while(*pBucketArray && compare(k, c, *pBucketArray))
+ {
+ node_type* const pNode = *pBucketArray;
+ *pBucketArray = pNode->mpNext;
+ DoFreeNode(pNode);
+ --mnElementCount;
+ }
+
+ return nElementCountSaved - mnElementCount;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::clear()
+ {
+ DoFreeNodes(mpBucketArray, mnBucketCount);
+ mnElementCount = 0;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::clear(bool clearBuckets)
+ {
+ DoFreeNodes(mpBucketArray, mnBucketCount);
+ if(clearBuckets)
+ {
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ reset_lose_memory();
+ }
+ mnElementCount = 0;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::reset_lose_memory() EA_NOEXCEPT
+ {
+ // The reset function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ mnBucketCount = 1;
+
+ #ifdef _MSC_VER
+ mpBucketArray = (node_type**)&gpEmptyBucketArray[0];
+ #else
+ void* p = &gpEmptyBucketArray[0];
+ memcpy(&mpBucketArray, &p, sizeof(mpBucketArray)); // Other compilers implement strict aliasing and casting is thus unsafe.
+ #endif
+
+ mnElementCount = 0;
+ mRehashPolicy.mnNextResize = 0;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::reserve(size_type nElementCount)
+ {
+ rehash(mRehashPolicy.GetBucketCount(uint32_t(nElementCount)));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::rehash(size_type nBucketCount)
+ {
+ // Note that we unilaterally use the passed in bucket count; we do not attempt migrate it
+ // up to the next prime number. We leave it at the user's discretion to do such a thing.
+ DoRehash(nBucketCount);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoRehash(size_type nNewBucketCount)
+ {
+ node_type** const pBucketArray = DoAllocateBuckets(nNewBucketCount); // nNewBucketCount should always be >= 2.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ node_type* pNode;
+
+ for(size_type i = 0; i < mnBucketCount; ++i)
+ {
+ while((pNode = mpBucketArray[i]) != NULL) // Using '!=' disables compiler warnings.
+ {
+ const size_type nNewBucketIndex = (size_type)bucket_index(pNode, (uint32_t)nNewBucketCount);
+
+ mpBucketArray[i] = pNode->mpNext;
+ pNode->mpNext = pBucketArray[nNewBucketIndex];
+ pBucketArray[nNewBucketIndex] = pNode;
+ }
+ }
+
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ mnBucketCount = nNewBucketCount;
+ mpBucketArray = pBucketArray;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ // A failure here means that a hash function threw an exception.
+ // We can't restore the previous state without calling the hash
+ // function again, so the only sensible recovery is to delete everything.
+ DoFreeNodes(pBucketArray, nNewBucketCount);
+ DoFreeBuckets(pBucketArray, nNewBucketCount);
+ DoFreeNodes(mpBucketArray, mnBucketCount);
+ mnElementCount = 0;
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::validate() const
+ {
+ // Verify our empty bucket array is unmodified.
+ if(gpEmptyBucketArray[0] != NULL)
+ return false;
+
+ if(gpEmptyBucketArray[1] != (void*)uintptr_t(~0))
+ return false;
+
+ // Verify that we have at least one bucket. Calculations can
+ // trigger division by zero exceptions otherwise.
+ if(mnBucketCount == 0)
+ return false;
+
+ // Verify that gpEmptyBucketArray is used correctly.
+ // gpEmptyBucketArray is only used when initially empty.
+ if((void**)mpBucketArray == &gpEmptyBucketArray[0])
+ {
+ if(mnElementCount) // gpEmptyBucketArray is used only for empty hash tables.
+ return false;
+
+ if(mnBucketCount != 1) // gpEmptyBucketArray is used exactly an only for mnBucketCount == 1.
+ return false;
+ }
+ else
+ {
+ if(mnBucketCount < 2) // Small bucket counts *must* use gpEmptyBucketArray.
+ return false;
+ }
+
+ // Verify that the element count matches mnElementCount.
+ size_type nElementCount = 0;
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ ++nElementCount;
+
+ if(nElementCount != mnElementCount)
+ return false;
+
+ // To do: Verify that individual elements are in the expected buckets.
+
+ return true;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ int hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ // operator==, != have been moved to the specific container subclasses (e.g. hash_map).
+
+ // The following comparison operators are deprecated and will likely be removed in a
+ // future version of this package.
+ //
+ // Comparing hash tables for less-ness is an odd thing to do. We provide it for
+ // completeness, though the user is advised to be wary of how they use this.
+ //
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator<(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ // This requires hash table elements to support operator<. Since the hash table
+ // doesn't compare elements via less (it does so via equals), we must use the
+ // globally defined operator less for the elements.
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator>(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator<=(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator>=(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void swap(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/include/EASTL/internal/in_place_t.h b/include/EASTL/internal/in_place_t.h
new file mode 100644
index 0000000..79acd18
--- /dev/null
+++ b/include/EASTL/internal/in_place_t.h
@@ -0,0 +1,82 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_IN_PLACE_T_H
+#define EASTL_INTERNAL_IN_PLACE_T_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+namespace eastl
+{
+ namespace Internal
+ {
+ struct in_place_tag {};
+ template <class> struct in_place_type_tag {};
+ template <size_t> struct in_place_index_tag {};
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// in_place_tag
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/in_place_tag
+ ///
+ struct in_place_tag
+ {
+ in_place_tag() = delete;
+
+ private:
+ explicit in_place_tag(Internal::in_place_tag) {}
+ friend inline in_place_tag Internal_ConstructInPlaceTag();
+ };
+
+ // internal factory function for in_place_tag
+ inline in_place_tag Internal_ConstructInPlaceTag() { return in_place_tag(Internal::in_place_tag{}); }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// in_place_t / in_place_type_t / in_place_index_t
+ ///
+ /// used to disambiguate overloads that take arguments (possibly a parameter
+ /// pack) for in-place construction of some value.
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/optional/in_place_t
+ ///
+ using in_place_t = in_place_tag(&)(Internal::in_place_tag);
+
+ template <class T>
+ using in_place_type_t = in_place_tag(&)(Internal::in_place_type_tag<T>);
+
+ template <size_t N>
+ using in_place_index_t = in_place_tag(&)(Internal::in_place_index_tag<N>);
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// in_place / in_place<T> / in_place<size_t>
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/in_place
+ ///
+ inline in_place_tag in_place(Internal::in_place_tag) { return Internal_ConstructInPlaceTag(); }
+
+ template <class T>
+ inline in_place_tag in_place(Internal::in_place_type_tag<T>) { return Internal_ConstructInPlaceTag(); }
+
+ template <std::size_t I>
+ inline in_place_tag in_place(Internal::in_place_index_tag<I>) { return Internal_ConstructInPlaceTag(); }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
diff --git a/include/EASTL/internal/integer_sequence.h b/include/EASTL/internal/integer_sequence.h
new file mode 100644
index 0000000..88cf1b1
--- /dev/null
+++ b/include/EASTL/internal/integer_sequence.h
@@ -0,0 +1,74 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_INTEGER_SEQUENCE_H
+#define EASTL_INTEGER_SEQUENCE_H
+
+#include <EABase/config/eacompiler.h>
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+namespace eastl
+{
+
+#if EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+
+// integer_sequence
+template <typename T, T... Ints>
+class integer_sequence
+{
+public:
+ typedef T value_type;
+ static_assert(is_integral<T>::value, "eastl::integer_sequence can only be instantiated with an integral type");
+ static EA_CONSTEXPR size_t size() EA_NOEXCEPT { return sizeof...(Ints); }
+};
+
+template <size_t N, typename IndexSeq>
+struct make_index_sequence_impl;
+
+template <size_t N, size_t... Is>
+struct make_index_sequence_impl<N, integer_sequence<size_t, Is...>>
+{
+ typedef typename make_index_sequence_impl<N - 1, integer_sequence<size_t, N - 1, Is...>>::type type;
+};
+
+template <size_t... Is>
+struct make_index_sequence_impl<0, integer_sequence<size_t, Is...>>
+{
+ typedef integer_sequence<size_t, Is...> type;
+};
+
+template <size_t... Is>
+using index_sequence = integer_sequence<size_t, Is...>;
+
+template <size_t N>
+using make_index_sequence = typename make_index_sequence_impl<N, integer_sequence<size_t>>::type;
+
+template <typename Target, typename Seq>
+struct integer_sequence_convert_impl;
+
+template <typename Target, size_t... Is>
+struct integer_sequence_convert_impl<Target, integer_sequence<size_t, Is...>>
+{
+ typedef integer_sequence<Target, Is...> type;
+};
+
+template <typename T, size_t N>
+struct make_integer_sequence_impl
+{
+ typedef typename integer_sequence_convert_impl<T, make_index_sequence<N>>::type type;
+};
+
+template <typename T, size_t N>
+using make_integer_sequence = typename make_integer_sequence_impl<T, N>::type;
+
+// Helper alias template that converts any type parameter pack into an index sequence of the same length
+template<typename... T>
+using index_sequence_for = make_index_sequence<sizeof...(T)>;
+
+#endif // EASTL_VARIADIC_TEMPLATES_ENABLED
+
+} // namespace eastl
+
+#endif // EASTL_INTEGER_SEQUENCE_H
diff --git a/include/EASTL/internal/intrusive_hashtable.h b/include/EASTL/internal/intrusive_hashtable.h
new file mode 100644
index 0000000..dccca5b
--- /dev/null
+++ b/include/EASTL/internal/intrusive_hashtable.h
@@ -0,0 +1,989 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements an intrusive hash table, which is a hash table whereby
+// the container nodes are the hash table objects themselves. This has benefits
+// primarily in terms of memory management. There are some minor limitations
+// that result from this.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_INTERNAL_INTRUSIVE_HASHTABLE_H
+#define EASTL_INTERNAL_INTRUSIVE_HASHTABLE_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/hashtable.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+
+EA_DISABLE_ALL_VC_WARNINGS();
+#include <new>
+#include <stddef.h>
+#include <string.h>
+EA_RESTORE_ALL_VC_WARNINGS();
+
+
+namespace eastl
+{
+
+ /// intrusive_hash_node
+ ///
+ /// A hash_node stores an element in a hash table, much like a
+ /// linked list node stores an element in a linked list.
+ /// An intrusive_hash_node additionally can, via template parameter,
+ /// store a hash code in the node to speed up hash calculations
+ /// and comparisons in some cases.
+ ///
+ /// To consider: Make a version of intrusive_hash_node which is
+ /// templated on the container type. This would allow for the
+ /// mpNext pointer to be the container itself and thus allow
+ /// for easier debugging.
+ ///
+ /// Example usage:
+ /// struct Widget : public intrusive_hash_node{ ... };
+ ///
+ /// struct Dagget : public intrusive_hash_node_key<int>{ ... };
+ ///
+ struct intrusive_hash_node
+ {
+ intrusive_hash_node* mpNext;
+ };
+
+
+ template <typename Key>
+ struct intrusive_hash_node_key : public intrusive_hash_node
+ {
+ typedef Key key_type;
+ Key mKey;
+ };
+
+
+
+ /// intrusive_node_iterator
+ ///
+ /// Node iterators iterate nodes within a given bucket.
+ ///
+ /// The bConst parameter defines if the iterator is a const_iterator
+ /// or an iterator.
+ ///
+ template <typename Value, bool bConst>
+ struct intrusive_node_iterator
+ {
+ public:
+ typedef intrusive_node_iterator<Value, bConst> this_type;
+ typedef Value value_type;
+ typedef Value node_type;
+ typedef ptrdiff_t difference_type;
+ typedef typename type_select<bConst, const Value*, Value*>::type pointer;
+ typedef typename type_select<bConst, const Value&, Value&>::type reference;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ intrusive_node_iterator()
+ : mpNode(NULL) { }
+
+ explicit intrusive_node_iterator(value_type* pNode)
+ : mpNode(pNode) { }
+
+ intrusive_node_iterator(const intrusive_node_iterator<Value, true>& x)
+ : mpNode(x.mpNode) { }
+
+ reference operator*() const
+ { return *mpNode; }
+
+ pointer operator->() const
+ { return mpNode; }
+
+ this_type& operator++()
+ { mpNode = static_cast<node_type*>(mpNode->mpNext); return *this; }
+
+ this_type operator++(int)
+ { this_type temp(*this); mpNode = static_cast<node_type*>(mpNode->mpNext); return temp; }
+
+ }; // intrusive_node_iterator
+
+
+
+
+ /// intrusive_hashtable_iterator_base
+ ///
+ /// An intrusive_hashtable_iterator_base iterates the entire hash table and
+ /// not just nodes within a single bucket. Users in general will use a hash
+ /// table iterator much more often, as it is much like other container
+ /// iterators (e.g. vector::iterator).
+ ///
+ /// We define a base class here because it is shared by both const and
+ /// non-const iterators.
+ ///
+ template <typename Value>
+ struct intrusive_hashtable_iterator_base
+ {
+ public:
+ typedef Value value_type;
+
+ protected:
+ template <typename, typename, typename, typename, size_t, bool, bool>
+ friend class intrusive_hashtable;
+
+ template <typename, bool>
+ friend struct intrusive_hashtable_iterator;
+
+ template <typename V>
+ friend bool operator==(const intrusive_hashtable_iterator_base<V>&, const intrusive_hashtable_iterator_base<V>&);
+
+ template <typename V>
+ friend bool operator!=(const intrusive_hashtable_iterator_base<V>&, const intrusive_hashtable_iterator_base<V>&);
+
+ value_type* mpNode; // Current node within current bucket.
+ value_type** mpBucket; // Current bucket.
+
+ public:
+ intrusive_hashtable_iterator_base(value_type* pNode, value_type** pBucket)
+ : mpNode(pNode), mpBucket(pBucket) { }
+
+ void increment_bucket()
+ {
+ ++mpBucket;
+ while(*mpBucket == NULL) // We store an extra bucket with some non-NULL value at the end
+ ++mpBucket; // of the bucket array so that finding the end of the bucket
+ mpNode = *mpBucket; // array is quick and simple.
+ }
+
+ void increment()
+ {
+ mpNode = static_cast<value_type*>(mpNode->mpNext);
+
+ while(mpNode == NULL)
+ mpNode = *++mpBucket;
+ }
+
+ }; // intrusive_hashtable_iterator_base
+
+
+
+
+ /// intrusive_hashtable_iterator
+ ///
+ /// An intrusive_hashtable_iterator iterates the entire hash table and not
+ /// just nodes within a single bucket. Users in general will use a hash
+ /// table iterator much more often, as it is much like other container
+ /// iterators (e.g. vector::iterator).
+ ///
+ /// The bConst parameter defines if the iterator is a const_iterator
+ /// or an iterator.
+ ///
+ template <typename Value, bool bConst>
+ struct intrusive_hashtable_iterator : public intrusive_hashtable_iterator_base<Value>
+ {
+ public:
+ typedef intrusive_hashtable_iterator_base<Value> base_type;
+ typedef intrusive_hashtable_iterator<Value, bConst> this_type;
+ typedef intrusive_hashtable_iterator<Value, false> this_type_non_const;
+ typedef typename base_type::value_type value_type;
+ typedef typename type_select<bConst, const Value*, Value*>::type pointer;
+ typedef typename type_select<bConst, const Value&, Value&>::type reference;
+ typedef ptrdiff_t difference_type;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ intrusive_hashtable_iterator()
+ : base_type(NULL, NULL) { }
+
+ explicit intrusive_hashtable_iterator(value_type* pNode, value_type** pBucket)
+ : base_type(pNode, pBucket) { }
+
+ explicit intrusive_hashtable_iterator(value_type** pBucket)
+ : base_type(*pBucket, pBucket) { }
+
+ intrusive_hashtable_iterator(const this_type_non_const& x)
+ : base_type(x.mpNode, x.mpBucket) { }
+
+ reference operator*() const
+ { return *base_type::mpNode; }
+
+ pointer operator->() const
+ { return base_type::mpNode; }
+
+ this_type& operator++()
+ { base_type::increment(); return *this; }
+
+ this_type operator++(int)
+ { this_type temp(*this); base_type::increment(); return temp; }
+
+ }; // intrusive_hashtable_iterator
+
+
+
+ /// use_intrusive_key
+ ///
+ /// operator()(x) returns x.mKey. Used in maps, as opposed to sets.
+ /// This is a template policy implementation; it is an alternative to
+ /// the use_self template implementation, which is used for sets.
+ ///
+ template <typename Node, typename Key>
+ struct use_intrusive_key // : public unary_function<T, T> // Perhaps we want to make it a subclass of unary_function.
+ {
+ typedef Key result_type;
+
+ const result_type& operator()(const Node& x) const
+ { return x.mKey; }
+ };
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// intrusive_hashtable
+ ///
+ template <typename Key, typename Value, typename Hash, typename Equal,
+ size_t bucketCount, bool bConstIterators, bool bUniqueKeys>
+ class intrusive_hashtable
+ {
+ public:
+ typedef intrusive_hashtable<Key, Value, Hash, Equal,
+ bucketCount, bConstIterators, bUniqueKeys> this_type;
+ typedef Key key_type;
+ typedef Value value_type;
+ typedef Value mapped_type;
+ typedef Value node_type;
+ typedef uint32_t hash_code_t;
+ typedef Equal key_equal;
+ typedef ptrdiff_t difference_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef intrusive_node_iterator<value_type, bConstIterators> local_iterator;
+ typedef intrusive_node_iterator<value_type, true> const_local_iterator;
+ typedef intrusive_hashtable_iterator<value_type, bConstIterators> iterator;
+ typedef intrusive_hashtable_iterator<value_type, true> const_iterator;
+ typedef typename type_select<bUniqueKeys, pair<iterator, bool>, iterator>::type insert_return_type;
+ typedef typename type_select<bConstIterators, eastl::use_self<Value>,
+ eastl::use_intrusive_key<Value, key_type> >::type extract_key;
+
+ enum
+ {
+ kBucketCount = bucketCount
+ };
+
+ protected:
+ node_type* mBucketArray[kBucketCount + 1]; // '+1' because we have an end bucket which is non-NULL so iterators always stop on it.
+ size_type mnElementCount;
+ Hash mHash; // To do: Use base class optimization to make this go away when it is of zero size.
+ Equal mEqual; // To do: Use base class optimization to make this go away when it is of zero size.
+
+ public:
+ intrusive_hashtable(const Hash&, const Equal&);
+
+ void swap(this_type& x);
+
+ iterator begin() EA_NOEXCEPT
+ {
+ iterator i(mBucketArray);
+ if(!i.mpNode)
+ i.increment_bucket();
+ return i;
+ }
+
+ const_iterator begin() const EA_NOEXCEPT
+ {
+ const_iterator i(const_cast<node_type**>(mBucketArray));
+ if(!i.mpNode)
+ i.increment_bucket();
+ return i;
+ }
+
+ const_iterator cbegin() const EA_NOEXCEPT
+ {
+ return begin();
+ }
+
+ iterator end() EA_NOEXCEPT
+ { return iterator(mBucketArray + kBucketCount); }
+
+ const_iterator end() const EA_NOEXCEPT
+ { return const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount); }
+
+ const_iterator cend() const EA_NOEXCEPT
+ { return const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount); }
+
+ local_iterator begin(size_type n) EA_NOEXCEPT
+ { return local_iterator(mBucketArray[n]); }
+
+ const_local_iterator begin(size_type n) const EA_NOEXCEPT
+ { return const_local_iterator(mBucketArray[n]); }
+
+ const_local_iterator cbegin(size_type n) const EA_NOEXCEPT
+ { return const_local_iterator(mBucketArray[n]); }
+
+ local_iterator end(size_type) EA_NOEXCEPT
+ { return local_iterator(NULL); }
+
+ const_local_iterator end(size_type) const EA_NOEXCEPT
+ { return const_local_iterator(NULL); }
+
+ const_local_iterator cend(size_type) const EA_NOEXCEPT
+ { return const_local_iterator(NULL); }
+
+ size_type size() const EA_NOEXCEPT
+ { return mnElementCount; }
+
+ bool empty() const EA_NOEXCEPT
+ { return mnElementCount == 0; }
+
+ size_type bucket_count() const EA_NOEXCEPT // This function is unnecessary, as the user can directly reference
+ { return kBucketCount; } // intrusive_hashtable::kBucketCount as a constant.
+
+ size_type bucket_size(size_type n) const EA_NOEXCEPT
+ { return (size_type)eastl::distance(begin(n), end(n)); }
+
+ size_type bucket(const key_type& k) const EA_NOEXCEPT
+ { return (size_type)(mHash(k) % kBucketCount); }
+
+ public:
+ float load_factor() const EA_NOEXCEPT
+ { return (float)mnElementCount / (float)kBucketCount; }
+
+ public:
+ insert_return_type insert(value_type& value)
+ { return DoInsertValue(value, integral_constant<bool, bUniqueKeys>()); }
+
+ insert_return_type insert(const_iterator, value_type& value)
+ { return insert(value); } // To consider: We might be able to use the iterator argument to specify a specific insertion location.
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last);
+
+ public:
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ size_type erase(const key_type& k);
+ iterator remove(value_type& value); // Removes by value instead of by iterator. This is an O(1) operation, due to this hashtable being 'intrusive'.
+
+ void clear();
+
+ public:
+ iterator find(const key_type& k);
+ const_iterator find(const key_type& k) const;
+
+ /// Implements a find whereby the user supplies a comparison of a different type
+ /// than the hashtable value_type. A useful case of this is one whereby you have
+ /// a container of string objects but want to do searches via passing in char pointers.
+ /// The problem is that without this kind of find, you need to do the expensive operation
+ /// of converting the char pointer to a string so it can be used as the argument to the
+ /// find function.
+ ///
+ /// Example usage:
+ /// hash_set<string> hashSet;
+ /// hashSet.find_as("hello"); // Use default hash and compare.
+ ///
+ /// Example usage (namespaces omitted for brevity):
+ /// hash_set<string> hashSet;
+ /// hashSet.find_as("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ template <typename U, typename UHash, typename BinaryPredicate>
+ iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate);
+
+ template <typename U, typename UHash, typename BinaryPredicate>
+ const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const;
+
+ template <typename U>
+ iterator find_as(const U& u);
+
+ template <typename U>
+ const_iterator find_as(const U& u) const;
+
+ size_type count(const key_type& k) const;
+
+ // The use for equal_range in a hash_table seems somewhat questionable.
+ // The primary reason for its existence is to replicate the interface of set/map.
+ eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+
+ public:
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ public:
+ Hash hash_function() const
+ { return mHash; }
+
+ Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
+ { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
+
+ const key_equal& key_eq() const
+ { return mEqual; }
+
+ key_equal& key_eq()
+ { return mEqual; }
+
+ protected:
+ eastl::pair<iterator, bool> DoInsertValue(value_type&, true_type); // true_type means bUniqueKeys is true.
+ iterator DoInsertValue(value_type&, false_type); // false_type means bUniqueKeys is false.
+
+ node_type* DoFindNode(node_type* pNode, const key_type& k) const;
+
+ template <typename U, typename BinaryPredicate>
+ node_type* DoFindNode(node_type* pNode, const U& u, BinaryPredicate predicate) const;
+
+ }; // class intrusive_hashtable
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // node_iterator_base
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, bool bConst>
+ inline bool operator==(const intrusive_node_iterator<Value, bConst>& a,
+ const intrusive_node_iterator<Value, bConst>& b)
+ { return a.mpNode == b.mpNode; }
+
+ template <typename Value, bool bConst>
+ inline bool operator!=(const intrusive_node_iterator<Value, bConst>& a,
+ const intrusive_node_iterator<Value, bConst>& b)
+ { return a.mpNode != b.mpNode; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // hashtable_iterator_base
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value>
+ inline bool operator==(const intrusive_hashtable_iterator_base<Value>& a,
+ const intrusive_hashtable_iterator_base<Value>& b)
+ { return a.mpNode == b.mpNode; }
+
+
+ template <typename Value>
+ inline bool operator!=(const intrusive_hashtable_iterator_base<Value>& a,
+ const intrusive_hashtable_iterator_base<Value>& b)
+ { return a.mpNode != b.mpNode; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // intrusive_hashtable
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::intrusive_hashtable(const H& h, const Eq& eq)
+ : mnElementCount(0),
+ mHash(h),
+ mEqual(eq)
+ {
+ memset(mBucketArray, 0, kBucketCount * sizeof(mBucketArray[0]));
+ mBucketArray[kBucketCount] = reinterpret_cast<node_type*>((uintptr_t)~0);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ void intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::swap(this_type& x)
+ {
+ for(size_t i = 0; i < kBucketCount; i++)
+ eastl::swap(mBucketArray[i], x.mBucketArray[i]);
+
+ eastl::swap(mnElementCount, x.mnElementCount);
+ eastl::swap(mHash, x.mHash);
+ eastl::swap(mEqual, x.mEqual);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find(const key_type& k)
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ node_type* const pNode = DoFindNode(mBucketArray[n], k);
+ return pNode ? iterator(pNode, mBucketArray + n) : iterator(mBucketArray + kBucketCount);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::const_iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find(const key_type& k) const
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ node_type* const pNode = DoFindNode(mBucketArray[n], k);
+ return pNode ? const_iterator(pNode, const_cast<node_type**>(mBucketArray) + n) : const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename U, typename UHash, typename BinaryPredicate>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate)
+ {
+ const size_type n = (size_type)(uhash(other) % kBucketCount);
+ node_type* const pNode = DoFindNode(mBucketArray[n], other, predicate);
+ return pNode ? iterator(pNode, mBucketArray + n) : iterator(mBucketArray + kBucketCount);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename U, typename UHash, typename BinaryPredicate>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::const_iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate) const
+ {
+ const size_type n = (size_type)(uhash(other) % kBucketCount);
+ node_type* const pNode = DoFindNode(mBucketArray[n], other, predicate);
+ return pNode ? const_iterator(pNode, const_cast<node_type**>(mBucketArray) + n) : const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount);
+ }
+
+
+ /// intrusive_hashtable_find
+ ///
+ /// Helper function that defaults to using hash<U> and equal_to_2<T, U>.
+ /// This makes it so that by default you don't need to provide these.
+ /// Note that the default hash functions may not be what you want, though.
+ ///
+ /// Example usage. Instead of this:
+ /// hash_set<string> hashSet;
+ /// hashSet.find("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ /// You can use this:
+ /// hash_set<string> hashSet;
+ /// hashtable_find(hashSet, "hello");
+ ///
+ template <typename H, typename U>
+ inline typename H::iterator intrusive_hashtable_find(H& hashTable, const U& u)
+ { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
+
+ template <typename H, typename U>
+ inline typename H::const_iterator intrusive_hashtable_find(const H& hashTable, const U& u)
+ { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
+
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename U>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find_as(const U& other)
+ { return eastl::intrusive_hashtable_find(*this, other); }
+ // VC++ doesn't appear to like the following, though it seems correct to me.
+ // So we implement the workaround above until we can straighten this out.
+ //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename U>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::const_iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::find_as(const U& other) const
+ { return eastl::intrusive_hashtable_find(*this, other); }
+ // VC++ doesn't appear to like the following, though it seems correct to me.
+ // So we implement the workaround above until we can straighten this out.
+ //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::size_type
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::count(const key_type& k) const
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ size_type result = 0;
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ // To do: Make a specialization for bU (unique keys) == true and take
+ // advantage of the fact that the count will always be zero or one in that case.
+ for(node_type* pNode = mBucketArray[n]; pNode; pNode = static_cast<node_type*>(pNode->mpNext))
+ {
+ if(mEqual(k, extractKey(*pNode)))
+ ++result;
+ }
+ return result;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ eastl::pair<typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator,
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator>
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::equal_range(const key_type& k)
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ node_type** head = mBucketArray + n;
+ node_type* pNode = DoFindNode(*head, k);
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ if(pNode)
+ {
+ node_type* p1 = static_cast<node_type*>(pNode->mpNext);
+
+ for(; p1; p1 = static_cast<node_type*>(p1->mpNext))
+ {
+ if(!mEqual(k, extractKey(*p1)))
+ break;
+ }
+
+ iterator first(pNode, head);
+ iterator last(p1, head);
+
+ if(!p1)
+ last.increment_bucket();
+
+ return eastl::pair<iterator, iterator>(first, last);
+ }
+
+ return eastl::pair<iterator, iterator>(iterator(mBucketArray + kBucketCount),
+ iterator(mBucketArray + kBucketCount));
+ }
+
+
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ eastl::pair<typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::const_iterator,
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::const_iterator>
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::equal_range(const key_type& k) const
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ node_type** head = const_cast<node_type**>(mBucketArray + n);
+ node_type* pNode = DoFindNode(*head, k);
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ if(pNode)
+ {
+ node_type* p1 = static_cast<node_type*>(pNode->mpNext);
+
+ for(; p1; p1 = static_cast<node_type*>(p1->mpNext))
+ {
+ if(!mEqual(k, extractKey(*p1)))
+ break;
+ }
+
+ const_iterator first(pNode, head);
+ const_iterator last(p1, head);
+
+ if(!p1)
+ last.increment_bucket();
+
+ return eastl::pair<const_iterator, const_iterator>(first, last);
+ }
+
+ return eastl::pair<const_iterator, const_iterator>(const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount),
+ const_iterator(const_cast<node_type**>(mBucketArray) + kBucketCount));
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::node_type*
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::DoFindNode(node_type* pNode, const key_type& k) const
+ {
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ for(; pNode; pNode = static_cast<node_type*>(pNode->mpNext))
+ {
+ if(mEqual(k, extractKey(*pNode)))
+ return pNode;
+ }
+ return NULL;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename U, typename BinaryPredicate>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::node_type*
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::DoFindNode(node_type* pNode, const U& other, BinaryPredicate predicate) const
+ {
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ for(; pNode; pNode = static_cast<node_type*>(pNode->mpNext))
+ {
+ if(predicate(extractKey(*pNode), other)) // Intentionally compare with key as first arg and other as second arg.
+ return pNode;
+ }
+ return NULL;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ eastl::pair<typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator, bool>
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::DoInsertValue(value_type& value, true_type) // true_type means bUniqueKeys is true.
+ {
+ // For sets (as opposed to maps), one could argue that all insertions are successful,
+ // as all elements are unique. However, the equal function might not think so.
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+ const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount);
+ node_type* const pNode = DoFindNode(mBucketArray[n], extractKey(value));
+
+ if(pNode == NULL)
+ {
+ value.mpNext = mBucketArray[n];
+ mBucketArray[n] = &value;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(&value, mBucketArray + n), true);
+ }
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mBucketArray + n), false);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::DoInsertValue(value_type& value, false_type) // false_type means bUniqueKeys is false.
+ {
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+ const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount);
+ node_type* const pNodePrev = DoFindNode(mBucketArray[n], extractKey(value));
+
+ if(pNodePrev == NULL)
+ {
+ value.mpNext = mBucketArray[n];
+ mBucketArray[n] = &value;
+ }
+ else
+ {
+ value.mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = &value;
+ }
+
+ ++mnElementCount;
+
+ return iterator(&value, mBucketArray + n);
+ }
+
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ template <typename InputIterator>
+ inline void intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::insert(InputIterator first, InputIterator last)
+ {
+ for(; first != last; ++first)
+ insert(*first);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::erase(const_iterator i)
+ {
+ iterator iNext(i.mpNode, i.mpBucket);
+ ++iNext;
+
+ node_type* pNode = i.mpNode;
+ node_type* pNodeCurrent = *i.mpBucket;
+
+ if(pNodeCurrent == pNode)
+ *i.mpBucket = static_cast<node_type*>(pNodeCurrent->mpNext);
+ else
+ {
+ // We have a singly-linked list, so we have no choice but to
+ // walk down it till we find the node before the node at 'i'.
+ node_type* pNodeNext = static_cast<node_type*>(pNodeCurrent->mpNext);
+
+ while(pNodeNext != pNode)
+ {
+ pNodeCurrent = pNodeNext;
+ pNodeNext = static_cast<node_type*>(pNodeCurrent->mpNext);
+ }
+
+ pNodeCurrent->mpNext = static_cast<node_type*>(pNodeNext->mpNext);
+ }
+
+ // To consider: In debug builds set the node mpNext to NULL.
+ --mnElementCount;
+
+ return iNext;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::erase(const_iterator first, const_iterator last)
+ {
+ while(first != last)
+ first = erase(first);
+ return iterator(first.mpNode, first.mpBucket);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::size_type
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::erase(const key_type& k)
+ {
+ const size_type n = (size_type)(mHash(k) % kBucketCount);
+ const size_type nElementCountSaved = mnElementCount;
+ node_type*& pNodeBase = mBucketArray[n];
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+
+ // Note by Paul Pedriana:
+ // We have two loops here, and I'm not finding any easy way to having just one
+ // loop without changing the requirements of the hashtable node definition.
+ // It's a problem of taking an address of a variable and converting it to the
+ // address of another type without knowing what that type is. Perhaps I'm a
+ // little overly tired, so if there is a simple solution I am probably missing it.
+
+ while(pNodeBase && mEqual(k, extractKey(*pNodeBase)))
+ {
+ pNodeBase = static_cast<node_type*>(pNodeBase->mpNext);
+ --mnElementCount;
+ }
+
+ node_type* pNodePrev = pNodeBase;
+
+ if(pNodePrev)
+ {
+ node_type* pNodeCur;
+
+ while((pNodeCur = static_cast<node_type*>(pNodePrev->mpNext)) != NULL)
+ {
+ if(mEqual(k, extractKey(*pNodeCur)))
+ {
+ pNodePrev->mpNext = static_cast<node_type*>(pNodeCur->mpNext);
+ --mnElementCount; // To consider: In debug builds set the node mpNext to NULL.
+ }
+ else
+ pNodePrev = static_cast<node_type*>(pNodePrev->mpNext);
+ }
+ }
+
+ return nElementCountSaved - mnElementCount;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline typename intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::iterator
+ intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::remove(value_type& value)
+ {
+ extract_key extractKey; // extract_key is empty and thus this ctor is a no-op.
+ const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount);
+
+ return erase(iterator(&value, &mBucketArray[n]));
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline void intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::clear()
+ {
+ // To consider: In debug builds set the node mpNext to NULL.
+ memset(mBucketArray, 0, kBucketCount * sizeof(mBucketArray[0]));
+ mnElementCount = 0;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::validate() const
+ {
+ // Verify that the element count matches mnElementCount.
+ size_type nElementCount = 0;
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ ++nElementCount;
+
+ if(nElementCount != mnElementCount)
+ return false;
+
+ // To do: Verify that individual elements are in the expected buckets.
+
+ return true;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ int intrusive_hashtable<K, V, H, Eq, bC, bM, bU>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator==(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin());
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator!=(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ return !(a == b);
+ }
+
+
+ // Comparing hash tables for less-ness is an odd thing to do. We provide it for
+ // completeness, though the user is advised to be wary of how they use this.
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator<(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ // This requires hash table elements to support operator<. Since the hash table
+ // doesn't compare elements via less (it does so via equals), we must use the
+ // globally defined operator less for the elements.
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator>(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator<=(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline bool operator>=(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename K, typename V, typename H, typename Eq, size_t bC, bool bM, bool bU>
+ inline void swap(const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& a,
+ const intrusive_hashtable<K, V, H, Eq, bC, bM, bU>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+
+#endif // Header include guard
diff --git a/include/EASTL/internal/mem_fn.h b/include/EASTL/internal/mem_fn.h
new file mode 100644
index 0000000..1d3e7b3
--- /dev/null
+++ b/include/EASTL/internal/mem_fn.h
@@ -0,0 +1,304 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_MEM_FN_H
+#define EASTL_INTERNAL_MEM_FN_H
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+#pragma once
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+// The code in this file is a modification of the libcxx implementation. We copy
+// the license information here as required.
+//
+// We implement only enough of mem_fn to implement eastl::function.
+////////////////////////////////////////////////////////////////////////////////
+
+//===------------------------ functional ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+namespace eastl
+{
+ //
+ // apply_cv
+ //
+ template <class T, class U,
+ bool = is_const<typename remove_reference<T>::type>::value,
+ bool = is_volatile<typename remove_reference<T>::type>::value>
+ struct apply_cv { typedef U type; };
+
+ template <class T, class U> struct apply_cv<T, U, true, false> { typedef const U type; };
+ template <class T, class U> struct apply_cv<T, U, false, true> { typedef volatile U type; };
+ template <class T, class U> struct apply_cv<T, U, true, true> { typedef const volatile U type; };
+ template <class T, class U> struct apply_cv<T&, U, false, false> { typedef U& type; };
+ template <class T, class U> struct apply_cv<T&, U, true, false> { typedef const U& type; };
+ template <class T, class U> struct apply_cv<T&, U, false, true> { typedef volatile U& type; };
+ template <class T, class U> struct apply_cv<T&, U, true, true> { typedef const volatile U& type; };
+
+
+
+ //
+ // has_result_type
+ //
+ template <class T>
+ struct has_result_type
+ {
+ private:
+ template <class U>
+ static eastl::no_type test(...);
+
+ template <class U>
+ static eastl::yes_type test(typename U::result_type* = 0);
+
+ public:
+ static const bool value = sizeof(test<T>(0)) == sizeof(eastl::yes_type);
+ };
+
+
+
+ //
+ // derives_from_unary_function
+ // derives_from_binary_function
+ //
+ template <class T>
+ struct derives_from_unary_function
+ {
+ private:
+ static eastl::no_type test(...);
+
+ template <class A, class R>
+ static unary_function<A, R> test(const volatile unary_function<A, R>*);
+
+ public:
+ static const bool value = !is_same<decltype(test((T*)0)), eastl::no_type>::value;
+ typedef decltype(test((T*)0)) type;
+ };
+
+ template <class T>
+ struct derives_from_binary_function
+ {
+ private:
+ static eastl::no_type test(...);
+ template <class A1, class A2, class R>
+ static binary_function<A1, A2, R> test(const volatile binary_function<A1, A2, R>*);
+
+ public:
+ static const bool value = !is_same<decltype(test((T*)0)), eastl::no_type>::value;
+ typedef decltype(test((T*)0)) type;
+ };
+
+
+
+ //
+ // maybe_derives_from_unary_function
+ // maybe_derives_from_binary_function
+ //
+ template <class T, bool = derives_from_unary_function<T>::value>
+ struct maybe_derive_from_unary_function // bool is true
+ : public derives_from_unary_function<T>::type { };
+
+ template <class T>
+ struct maybe_derive_from_unary_function<T, false> { };
+
+ template <class T, bool = derives_from_binary_function<T>::value>
+ struct maybe_derive_from_binary_function // bool is true
+ : public derives_from_binary_function<T>::type { };
+
+ template <class T>
+ struct maybe_derive_from_binary_function<T, false> { };
+
+
+
+ //
+ // weak_result_type_imp
+ //
+ template <class T, bool = has_result_type<T>::value>
+ struct weak_result_type_imp // bool is true
+ : public maybe_derive_from_unary_function<T>,
+ public maybe_derive_from_binary_function<T>
+ {
+ typedef typename T::result_type result_type;
+ };
+
+ template <class T>
+ struct weak_result_type_imp<T, false> : public maybe_derive_from_unary_function<T>,
+ public maybe_derive_from_binary_function<T> { };
+
+
+
+ //
+ // weak_result_type
+ //
+ template <class T>
+ struct weak_result_type : public weak_result_type_imp<T> { };
+
+ // 0 argument case
+ template <class R> struct weak_result_type<R()> { typedef R result_type; };
+ template <class R> struct weak_result_type<R(&)()> { typedef R result_type; };
+ template <class R> struct weak_result_type<R (*)()> { typedef R result_type; };
+
+ // 1 argument case
+ template <class R, class A1> struct weak_result_type<R(A1)> : public unary_function<A1, R> { };
+ template <class R, class A1> struct weak_result_type<R(&)(A1)> : public unary_function<A1, R> { };
+ template <class R, class A1> struct weak_result_type<R (*)(A1)> : public unary_function<A1, R> { };
+ template <class R, class C> struct weak_result_type<R (C::*)()> : public unary_function<C*, R> { };
+ template <class R, class C> struct weak_result_type<R (C::*)() const> : public unary_function<const C*, R> { };
+ template <class R, class C> struct weak_result_type<R (C::*)() volatile> : public unary_function<volatile C*, R> { };
+ template <class R, class C> struct weak_result_type<R (C::*)() const volatile> : public unary_function<const volatile C*, R> { };
+
+ // 2 argument case
+ template <class R, class A1, class A2> struct weak_result_type<R(A1, A2)> : public binary_function<A1, A2, R> { };
+ template <class R, class A1, class A2> struct weak_result_type<R (*)(A1, A2)> : public binary_function<A1, A2, R> { };
+ template <class R, class A1, class A2> struct weak_result_type<R(&)(A1, A2)> : public binary_function<A1, A2, R> { };
+ template <class R, class C, class A1> struct weak_result_type<R (C::*)(A1)> : public binary_function<C*, A1, R> { };
+ template <class R, class C, class A1> struct weak_result_type<R (C::*)(A1) const> : public binary_function<const C*, A1, R> { };
+ template <class R, class C, class A1> struct weak_result_type<R (C::*)(A1) volatile> : public binary_function<volatile C*, A1, R> { };
+ template <class R, class C, class A1> struct weak_result_type<R (C::*)(A1) const volatile> : public binary_function<const volatile C*, A1, R> { };
+
+ // 3 or more arguments
+#if EASTL_VARIADIC_TEMPLATES_ENABLED
+ template <class R, class A1, class A2, class A3, class... A4> struct weak_result_type<R(A1, A2, A3, A4...)> { typedef R result_type; };
+ template <class R, class A1, class A2, class A3, class... A4> struct weak_result_type<R(&)(A1, A2, A3, A4...)> { typedef R result_type; };
+ template <class R, class A1, class A2, class A3, class... A4> struct weak_result_type<R (*)(A1, A2, A3, A4...)> { typedef R result_type; };
+ template <class R, class C, class A1, class A2, class... A3> struct weak_result_type<R (C::*)(A1, A2, A3...)> { typedef R result_type; };
+ template <class R, class C, class A1, class A2, class... A3> struct weak_result_type<R (C::*)(A1, A2, A3...) const> { typedef R result_type; };
+ template <class R, class C, class A1, class A2, class... A3> struct weak_result_type<R (C::*)(A1, A2, A3...) volatile> { typedef R result_type; };
+ template <class R, class C, class A1, class A2, class... A3> struct weak_result_type<R (C::*)(A1, A2, A3...) const volatile> { typedef R result_type; };
+#endif
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // mem_fn_impl
+ //
+ template <class T>
+ class mem_fn_impl
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later
+ // Due to a (seemingly random) internal compiler error on VS2013 we disable eastl::unary_function and
+ // binary_function support for eastl::mem_fn as its not widely (if at all) used. If you require this support
+ // on VS2013 or below please contact us.
+ : public weak_result_type<T>
+#endif
+ {
+ public:
+ typedef T type;
+
+ private:
+ type func;
+
+ public:
+ EASTL_FORCE_INLINE mem_fn_impl(type _func) : func(_func) {}
+
+#if EASTL_VARIADIC_TEMPLATES_ENABLED
+ template <class... ArgTypes>
+ typename invoke_result<type, ArgTypes...>::type operator()(ArgTypes&&... args) const
+ {
+ return invoke(func, eastl::forward<ArgTypes>(args)...);
+ }
+#else
+ typename invoke_result<type>::type operator()() const { return invoke_impl(func); }
+
+ template <class A0>
+ typename invoke_result0<type, A0>::type operator()(A0& a0) const
+ {
+ return invoke(func, a0);
+ }
+
+ template <class A0, class A1>
+ typename invoke_result1<type, A0, A1>::type operator()(A0& a0, A1& a1) const
+ {
+ return invoke(func, a0, a1);
+ }
+
+ template <class A0, class A1, class A2>
+ typename invoke_result2<type, A0, A1, A2>::type operator()(A0& a0, A1& a1, A2& a2) const
+ {
+ return invoke(func, a0, a1, a2);
+ }
+#endif
+ }; // mem_fn_impl
+
+
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // mem_fn -> mem_fn_impl adapters
+ //
+ template <class R, class T>
+ EASTL_FORCE_INLINE mem_fn_impl<R T::*> mem_fn(R T::*pm)
+ { return mem_fn_impl<R T::*>(pm); }
+
+ template <class R, class T>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)()> mem_fn(R (T::*pm)())
+ { return mem_fn_impl<R (T::*)()>(pm); }
+
+ template <class R, class T, class A0>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0)> mem_fn(R (T::*pm)(A0))
+ { return mem_fn_impl<R (T::*)(A0)>(pm); }
+
+ template <class R, class T, class A0, class A1>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1)> mem_fn(R (T::*pm)(A0, A1))
+ { return mem_fn_impl<R (T::*)(A0, A1)>(pm); }
+
+ template <class R, class T, class A0, class A1, class A2>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1, A2)> mem_fn(R (T::*pm)(A0, A1, A2))
+ { return mem_fn_impl<R (T::*)(A0, A1, A2)>(pm); }
+
+ template <class R, class T>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)() const> mem_fn(R (T::*pm)() const)
+ { return mem_fn_impl<R (T::*)() const>(pm); }
+
+ template <class R, class T, class A0>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0) const> mem_fn(R (T::*pm)(A0) const)
+ { return mem_fn_impl<R (T::*)(A0) const>(pm); }
+
+ template <class R, class T, class A0, class A1>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1) const> mem_fn(R (T::*pm)(A0, A1) const)
+ { return mem_fn_impl<R (T::*)(A0, A1) const>(pm); }
+
+ template <class R, class T, class A0, class A1, class A2>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1, A2) const> mem_fn(R (T::*pm)(A0, A1, A2) const)
+ { return mem_fn_impl<R (T::*)(A0, A1, A2) const>(pm); }
+
+ template <class R, class T>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)() volatile> mem_fn(R (T::*pm)() volatile)
+ { return mem_fn_impl<R (T::*)() volatile>(pm); }
+
+ template <class R, class T, class A0>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0) volatile> mem_fn(R (T::*pm)(A0) volatile)
+ { return mem_fn_impl<R (T::*)(A0) volatile>(pm); }
+
+ template <class R, class T, class A0, class A1>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1) volatile> mem_fn(R (T::*pm)(A0, A1) volatile)
+ { return mem_fn_impl<R (T::*)(A0, A1) volatile>(pm); }
+
+ template <class R, class T, class A0, class A1, class A2>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1, A2) volatile> mem_fn(R (T::*pm)(A0, A1, A2) volatile)
+ { return mem_fn_impl<R (T::*)(A0, A1, A2) volatile>(pm); }
+
+ template <class R, class T>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)() const volatile> mem_fn(R (T::*pm)() const volatile)
+ { return mem_fn_impl<R (T::*)() const volatile>(pm); }
+
+ template <class R, class T, class A0>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0) const volatile> mem_fn(R (T::*pm)(A0) const volatile)
+ { return mem_fn_impl<R (T::*)(A0) const volatile>(pm); }
+
+ template <class R, class T, class A0, class A1>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1) const volatile> mem_fn(R (T::*pm)(A0, A1) const volatile)
+ { return mem_fn_impl<R (T::*)(A0, A1) const volatile>(pm); }
+
+ template <class R, class T, class A0, class A1, class A2>
+ EASTL_FORCE_INLINE mem_fn_impl<R (T::*)(A0, A1, A2) const volatile> mem_fn(R (T::*pm)(A0, A1, A2) const volatile)
+ { return mem_fn_impl<R (T::*)(A0, A1, A2) const volatile>(pm); }
+
+} // namespace eastl
+
+#endif // EASTL_INTERNAL_MEM_FN_H
diff --git a/include/EASTL/internal/memory_base.h b/include/EASTL/internal/memory_base.h
new file mode 100644
index 0000000..b1c3490
--- /dev/null
+++ b/include/EASTL/internal/memory_base.h
@@ -0,0 +1,37 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_INTERNAL_MEMORY_BASE_H
+#define EASTL_INTERNAL_MEMORY_BASE_H
+
+#include <EASTL/internal/config.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
+#endif
+
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// This file contains basic functionality found in the standard library 'memory' header that
+// have limited or no dependencies. This allows us to utilize these utilize these functions
+// in other EASTL code while avoid circular dependencies.
+////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace eastl
+{
+ /// addressof
+ ///
+ /// From the C++11 Standard, section 20.6.12.1
+ /// Returns the actual address of the object or function referenced by r, even in the presence of an overloaded operator&.
+ ///
+ template<typename T>
+ T* addressof(T& value) EA_NOEXCEPT
+ {
+ return reinterpret_cast<T*>(&const_cast<char&>(reinterpret_cast<const volatile char&>(value)));
+ }
+
+} // namespace eastl
+
+#endif // EASTL_INTERNAL_MEMORY_BASE_H
+
diff --git a/include/EASTL/internal/move_help.h b/include/EASTL/internal/move_help.h
new file mode 100644
index 0000000..97990df
--- /dev/null
+++ b/include/EASTL/internal/move_help.h
@@ -0,0 +1,162 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_MOVE_HELP_H
+#define EASTL_INTERNAL_MOVE_HELP_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+
+// C++11's rvalue references aren't supported by earlier versions of C++.
+// It turns out that in a number of cases under earlier C++ versions we can
+// write code that uses rvalues similar to lvalues. We have macros below for
+// such cases. For example, eastl::move (same as std::move) can be treated
+// as a no-op under C++03, though with the consequence that move functionality
+// isn't taken advantage of.
+
+
+/// EASTL_MOVE
+/// Acts like eastl::move when possible. Same as C++11 std::move.
+///
+/// EASTL_MOVE_INLINE
+/// Acts like eastl::move but is implemented inline instead of a function call.
+/// This allows code to be faster in debug builds in particular.
+/// Depends on C++ compiler decltype support or a similar extension.
+///
+/// EASTL_FORWARD
+/// Acts like eastl::forward when possible. Same as C++11 std::forward.
+///
+/// EASTL_FORWARD_INLINE
+/// Acts like eastl::forward but is implemented inline instead of a function call.
+/// This allows code to be faster in debug builds in particular.
+///
+#define EASTL_MOVE(x) eastl::move(x)
+#if !defined(EA_COMPILER_NO_DECLTYPE)
+ #define EASTL_MOVE_INLINE(x) static_cast<typename eastl::remove_reference<decltype(x)>::type&&>(x)
+#elif defined(__GNUC__)
+ #define EASTL_MOVE_INLINE(x) static_cast<typename eastl::remove_reference<__typeof__(x)>::type&&>(x)
+#else
+ #define EASTL_MOVE_INLINE(x) eastl::move(x)
+#endif
+
+#define EASTL_FORWARD(T, x) eastl::forward<T>(x)
+#define EASTL_FORWARD_INLINE(T, x) eastl::forward<T>(x) // Need to investigate how to properly make a macro for this. (eastl::is_reference<T>::value ? static_cast<T&&>(static_cast<T&>(x)) : static_cast<T&&>(x))
+
+
+
+
+/// EASTL_MOVE_RANGE
+/// Acts like the eastl::move algorithm when possible. Same as C++11 std::move.
+/// Note to be confused with the single argument move: (typename remove_reference<T>::type&& move(T&& x))
+/// http://en.cppreference.com/w/cpp/algorithm/move
+/// http://en.cppreference.com/w/cpp/algorithm/move_backward
+///
+#define EASTL_MOVE_RANGE(first, last, result) eastl::move(first, last, result)
+#define EASTL_MOVE_BACKWARD_RANGE(first, last, resultEnd) eastl::move_backward(first, last, resultEnd)
+
+
+namespace eastl
+{
+ // forward
+ //
+ // forwards the argument to another function exactly as it was passed to the calling function.
+ // Not to be confused with move, this is specifically for echoing templated argument types
+ // to another function. move is specifically about making a type be an rvalue reference (i.e. movable) type.
+ //
+ // Example usage:
+ // template <class T>
+ // void WrapperFunction(T&& arg)
+ // { foo(eastl::forward<T>(arg)); }
+ //
+ // template <class... Args>
+ // void WrapperFunction(Args&&... args)
+ // { foo(eastl::forward<Args>(args)...); }
+ //
+ // See the C++ Standard, section 20.2.3
+ // http://en.cppreference.com/w/cpp/utility/forward
+ //
+ template <typename T>
+ EA_CPP14_CONSTEXPR T&& forward(typename eastl::remove_reference<T>::type& x) EA_NOEXCEPT
+ {
+ return static_cast<T&&>(x);
+ }
+
+
+ template <typename T>
+ EA_CPP14_CONSTEXPR T&& forward(typename eastl::remove_reference<T>::type&& x) EA_NOEXCEPT
+ {
+ static_assert(!is_lvalue_reference<T>::value, "forward T isn't lvalue reference");
+ return static_cast<T&&>(x);
+ }
+
+
+ // move
+ //
+ // move obtains an rvalue reference to its argument and converts it to an xvalue.
+ // Returns, by definition: static_cast<typename remove_reference<T>::type&&>(t).
+ // The primary use of this is to pass a move'd type to a function which takes T&&,
+ // and thus select that function instead of (e.g.) a function which takes T or T&.
+ // See the C++ Standard, section 20.2.3
+ // http://en.cppreference.com/w/cpp/utility/move
+ //
+ template <typename T>
+ EA_CPP14_CONSTEXPR typename eastl::remove_reference<T>::type&&
+ move(T&& x) EA_NOEXCEPT
+ {
+ return static_cast<typename eastl::remove_reference<T>::type&&>(x);
+ }
+
+
+ // move_if_noexcept
+ //
+ // Returns T&& if move-constructing T throws no exceptions. Instead returns const T& if
+ // move-constructing T throws exceptions or has no accessible copy constructor.
+ // The purpose of this is to use automatically use copy construction instead of move
+ // construction when the move may possible throw an exception.
+ // See the C++ Standard, section 20.2.3
+ // http://en.cppreference.com/w/cpp/utility/move_if_noexcept
+ //
+ #if EASTL_EXCEPTIONS_ENABLED
+ template <typename T>
+ EA_CPP14_CONSTEXPR typename eastl::conditional<!eastl::is_nothrow_move_constructible<T>::value &&
+ eastl::is_copy_constructible<T>::value, const T&, T&&>::type
+ move_if_noexcept(T& x) EA_NOEXCEPT
+ {
+ return eastl::move(x);
+ }
+ #else
+ template <typename T>
+ EA_CPP14_CONSTEXPR T&&
+ move_if_noexcept(T& x) EA_NOEXCEPT
+ {
+ return eastl::move(x);
+ }
+ #endif
+
+} // namespace eastl
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/EASTL/internal/pair_fwd_decls.h b/include/EASTL/internal/pair_fwd_decls.h
new file mode 100644
index 0000000..a716482
--- /dev/null
+++ b/include/EASTL/internal/pair_fwd_decls.h
@@ -0,0 +1,16 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_PAIR_FWD_DECLS_H
+#define EASTL_PAIR_FWD_DECLS_H
+
+#include <EASTL/internal/config.h>
+
+namespace eastl
+{
+ template <typename T1, typename T2>
+ struct pair;
+}
+
+#endif // EASTL_PAIR_FWD_DECLS_H
diff --git a/include/EASTL/internal/piecewise_construct_t.h b/include/EASTL/internal/piecewise_construct_t.h
new file mode 100644
index 0000000..d853f0e
--- /dev/null
+++ b/include/EASTL/internal/piecewise_construct_t.h
@@ -0,0 +1,46 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_PIECEWISE_CONSTRUCT_T_H
+#define EASTL_INTERNAL_PIECEWISE_CONSTRUCT_T_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////////////
+ /// piecewise_construct_t
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/piecewise_construct_t
+ ///
+ struct piecewise_construct_t
+ {
+ explicit piecewise_construct_t() = default;
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// piecewise_construct
+ ///
+ /// A tag type used to disambiguate between function overloads that take two tuple arguments.
+ ///
+ /// http://en.cppreference.com/w/cpp/utility/piecewise_construct
+ ///
+ EA_CONSTEXPR piecewise_construct_t piecewise_construct = eastl::piecewise_construct_t();
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
diff --git a/include/EASTL/internal/red_black_tree.h b/include/EASTL/internal/red_black_tree.h
new file mode 100644
index 0000000..7448bd4
--- /dev/null
+++ b/include/EASTL/internal/red_black_tree.h
@@ -0,0 +1,2400 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_RED_BLACK_TREE_H
+#define EASTL_RED_BLACK_TREE_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/allocator.h>
+#include <EASTL/iterator.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/initializer_list.h>
+#include <EASTL/tuple.h>
+
+EA_DISABLE_ALL_VC_WARNINGS()
+#include <new>
+#include <stddef.h>
+EA_RESTORE_ALL_VC_WARNINGS()
+
+
+// 4512 - 'class' : assignment operator could not be generated
+// 4530 - C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+// 4571 - catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught.
+EA_DISABLE_VC_WARNING(4512 4530 4571);
+
+
+namespace eastl
+{
+
+ /// EASTL_RBTREE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_RBTREE_DEFAULT_NAME
+ #define EASTL_RBTREE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " rbtree" // Unless the user overrides something, this is "EASTL rbtree".
+ #endif
+
+
+ /// EASTL_RBTREE_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_RBTREE_DEFAULT_ALLOCATOR
+ #define EASTL_RBTREE_DEFAULT_ALLOCATOR allocator_type(EASTL_RBTREE_DEFAULT_NAME)
+ #endif
+
+
+ /// EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ ///
+ #ifndef EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ #define EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR 0
+ #endif
+
+
+ /// RBTreeColor
+ ///
+ enum RBTreeColor
+ {
+ kRBTreeColorRed,
+ kRBTreeColorBlack
+ };
+
+
+
+ /// RBTreeColor
+ ///
+ enum RBTreeSide
+ {
+ kRBTreeSideLeft,
+ kRBTreeSideRight
+ };
+
+
+
+ /// rbtree_node_base
+ ///
+ /// We define a rbtree_node_base separately from rbtree_node (below), because it
+ /// allows us to have non-templated operations, and it makes it so that the
+ /// rbtree anchor node doesn't carry a T with it, which would waste space and
+ /// possibly lead to surprising the user due to extra Ts existing that the user
+ /// didn't explicitly create. The downside to all of this is that it makes debug
+ /// viewing of an rbtree harder, given that the node pointers are of type
+ /// rbtree_node_base and not rbtree_node.
+ ///
+ struct rbtree_node_base
+ {
+ typedef rbtree_node_base this_type;
+
+ public:
+ this_type* mpNodeRight; // Declared first because it is used most often.
+ this_type* mpNodeLeft;
+ this_type* mpNodeParent;
+ char mColor; // We only need one bit here, would be nice if we could stuff that bit somewhere else.
+ };
+
+
+ /// rbtree_node
+ ///
+ template <typename Value>
+ struct rbtree_node : public rbtree_node_base
+ {
+ Value mValue; // For set and multiset, this is the user's value, for map and multimap, this is a pair of key/value.
+
+ // This type is never constructed, so to avoid a MSVC warning we "delete" the copy constructor.
+ //
+ // Potentially we could provide a constructor that would satisfy the compiler and change the code to use this constructor
+ // instead of constructing mValue in place within an unconstructed rbtree_node.
+ #if defined(_MSC_VER)
+ rbtree_node(const rbtree_node&) = delete;
+ #endif
+ };
+
+
+
+
+ // rbtree_node_base functions
+ //
+ // These are the fundamental functions that we use to maintain the
+ // tree. The bulk of the work of the tree maintenance is done in
+ // these functions.
+ //
+ EASTL_API rbtree_node_base* RBTreeIncrement (const rbtree_node_base* pNode);
+ EASTL_API rbtree_node_base* RBTreeDecrement (const rbtree_node_base* pNode);
+ EASTL_API rbtree_node_base* RBTreeGetMinChild (const rbtree_node_base* pNode);
+ EASTL_API rbtree_node_base* RBTreeGetMaxChild (const rbtree_node_base* pNode);
+ EASTL_API size_t RBTreeGetBlackCount(const rbtree_node_base* pNodeTop,
+ const rbtree_node_base* pNodeBottom);
+ EASTL_API void RBTreeInsert ( rbtree_node_base* pNode,
+ rbtree_node_base* pNodeParent,
+ rbtree_node_base* pNodeAnchor,
+ RBTreeSide insertionSide);
+ EASTL_API void RBTreeErase ( rbtree_node_base* pNode,
+ rbtree_node_base* pNodeAnchor);
+
+
+
+
+
+
+
+ /// rbtree_iterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct rbtree_iterator
+ {
+ typedef rbtree_iterator<T, Pointer, Reference> this_type;
+ typedef rbtree_iterator<T, T*, T&> iterator;
+ typedef rbtree_iterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef rbtree_node_base base_node_type;
+ typedef rbtree_node<T> node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ rbtree_iterator();
+ explicit rbtree_iterator(const node_type* pNode);
+ rbtree_iterator(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ rbtree_iterator& operator++();
+ rbtree_iterator operator++(int);
+
+ rbtree_iterator& operator--();
+ rbtree_iterator operator--(int);
+
+ }; // rbtree_iterator
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // rb_base_compare_ebo
+ //
+ // Utilizes the "empty base-class optimization" to reduce the size of the rbtree
+ // when its Compare template argument is an empty class.
+ ///////////////////////////////////////////////////////////////////////////////
+
+ template <typename Compare, bool /*isEmpty*/ = is_empty<Compare>::value>
+ struct rb_base_compare_ebo
+ {
+ protected:
+ rb_base_compare_ebo() : mCompare() {}
+ rb_base_compare_ebo(const Compare& compare) : mCompare(compare) {}
+
+ Compare& get_compare() { return mCompare; }
+ const Compare& get_compare() const { return mCompare; }
+
+ template <typename T>
+ bool compare(const T& lhs, const T& rhs)
+ {
+ return mCompare(lhs, rhs);
+ }
+
+ template <typename T>
+ bool compare(const T& lhs, const T& rhs) const
+ {
+ return mCompare(lhs, rhs);
+ }
+
+ private:
+ Compare mCompare;
+ };
+
+ template <typename Compare>
+ struct rb_base_compare_ebo<Compare, true> : private Compare
+ {
+ protected:
+ rb_base_compare_ebo() {}
+ rb_base_compare_ebo(const Compare& compare) : Compare(compare) {}
+
+ Compare& get_compare() { return *this; }
+ const Compare& get_compare() const { return *this; }
+
+ template <typename T>
+ bool compare(const T& lhs, const T& rhs)
+ {
+ return Compare::operator()(lhs, rhs);
+ }
+
+ template <typename T>
+ bool compare(const T& lhs, const T& rhs) const
+ {
+ return Compare::operator()(lhs, rhs);
+ }
+ };
+
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // rb_base
+ //
+ // This class allows us to use a generic rbtree as the basis of map, multimap,
+ // set, and multiset transparently. The vital template parameters for this are
+ // the ExtractKey and the bUniqueKeys parameters.
+ //
+ // If the rbtree has a value type of the form pair<T1, T2> (i.e. it is a map or
+ // multimap and not a set or multiset) and a key extraction policy that returns
+ // the first part of the pair, the rbtree gets a mapped_type typedef.
+ // If it satisfies those criteria and also has unique keys, then it also gets an
+ // operator[] (which only map and set have and multimap and multiset don't have).
+ //
+ ///////////////////////////////////////////////////////////////////////////////
+
+
+
+ /// rb_base
+ /// This specialization is used for 'set'. In this case, Key and Value
+ /// will be the same as each other and ExtractKey will be eastl::use_self.
+ ///
+ template <typename Key, typename Value, typename Compare, typename ExtractKey, bool bUniqueKeys, typename RBTree>
+ struct rb_base : public rb_base_compare_ebo<Compare>
+ {
+ typedef ExtractKey extract_key;
+
+ protected:
+ using rb_base_compare_ebo<Compare>::compare;
+ using rb_base_compare_ebo<Compare>::get_compare;
+
+ public:
+ rb_base() {}
+ rb_base(const Compare& compare) : rb_base_compare_ebo<Compare>(compare) {}
+ };
+
+
+ /// rb_base
+ /// This class is used for 'multiset'.
+ /// In this case, Key and Value will be the same as each
+ /// other and ExtractKey will be eastl::use_self.
+ ///
+ template <typename Key, typename Value, typename Compare, typename ExtractKey, typename RBTree>
+ struct rb_base<Key, Value, Compare, ExtractKey, false, RBTree> : public rb_base_compare_ebo<Compare>
+ {
+ typedef ExtractKey extract_key;
+
+ protected:
+ using rb_base_compare_ebo<Compare>::compare;
+ using rb_base_compare_ebo<Compare>::get_compare;
+
+ public:
+ rb_base() {}
+ rb_base(const Compare& compare) : rb_base_compare_ebo<Compare>(compare) {}
+ };
+
+
+ /// rb_base
+ /// This specialization is used for 'map'.
+ ///
+ template <typename Key, typename Pair, typename Compare, typename RBTree>
+ struct rb_base<Key, Pair, Compare, eastl::use_first<Pair>, true, RBTree> : public rb_base_compare_ebo<Compare>
+ {
+ typedef eastl::use_first<Pair> extract_key;
+
+ using rb_base_compare_ebo<Compare>::compare;
+ using rb_base_compare_ebo<Compare>::get_compare;
+
+ public:
+ rb_base() {}
+ rb_base(const Compare& compare) : rb_base_compare_ebo<Compare>(compare) {}
+ };
+
+
+ /// rb_base
+ /// This specialization is used for 'multimap'.
+ ///
+ template <typename Key, typename Pair, typename Compare, typename RBTree>
+ struct rb_base<Key, Pair, Compare, eastl::use_first<Pair>, false, RBTree> : public rb_base_compare_ebo<Compare>
+ {
+ typedef eastl::use_first<Pair> extract_key;
+
+ using rb_base_compare_ebo<Compare>::compare;
+ using rb_base_compare_ebo<Compare>::get_compare;
+
+ public:
+ rb_base() {}
+ rb_base(const Compare& compare) : rb_base_compare_ebo<Compare>(compare) {}
+ };
+
+
+ /// rbtree
+ ///
+ /// rbtree is the red-black tree basis for the map, multimap, set, and multiset
+ /// containers. Just about all the work of those containers is done here, and
+ /// they are merely a shell which sets template policies that govern the code
+ /// generation for this rbtree.
+ ///
+ /// This rbtree implementation is pretty much the same as all other modern
+ /// rbtree implementations, as the topic is well known and researched. We may
+ /// choose to implement a "relaxed balancing" option at some point in the
+ /// future if it is deemed worthwhile. Most rbtree implementations don't do this.
+ ///
+ /// The primary rbtree member variable is mAnchor, which is a node_type and
+ /// acts as the end node. However, like any other node, it has mpNodeLeft,
+ /// mpNodeRight, and mpNodeParent members. We do the conventional trick of
+ /// assigning begin() (left-most rbtree node) to mpNodeLeft, assigning
+ /// 'end() - 1' (a.k.a. rbegin()) to mpNodeRight, and assigning the tree root
+ /// node to mpNodeParent.
+ ///
+ /// Compare (functor): This is a comparison class which defaults to 'less'.
+ /// It is a common STL thing which takes two arguments and returns true if
+ /// the first is less than the second.
+ ///
+ /// ExtractKey (functor): This is a class which gets the key from a stored
+ /// node. With map and set, the node is a pair, whereas with set and multiset
+ /// the node is just the value. ExtractKey will be either eastl::use_first (map and multimap)
+ /// or eastl::use_self (set and multiset).
+ ///
+ /// bMutableIterators (bool): true if rbtree::iterator is a mutable
+ /// iterator, false if iterator and const_iterator are both const iterators.
+ /// It will be true for map and multimap and false for set and multiset.
+ ///
+ /// bUniqueKeys (bool): true if the keys are to be unique, and false if there
+ /// can be multiple instances of a given key. It will be true for set and map
+ /// and false for multiset and multimap.
+ ///
+ /// To consider: Add an option for relaxed tree balancing. This could result
+ /// in performance improvements but would require a more complicated implementation.
+ ///
+ ///////////////////////////////////////////////////////////////////////
+ /// find_as
+ /// In order to support the ability to have a tree of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the tree's key type. See the find_as function
+ /// for more documentation on this.
+ ///
+ template <typename Key, typename Value, typename Compare, typename Allocator,
+ typename ExtractKey, bool bMutableIterators, bool bUniqueKeys>
+ class rbtree
+ : public rb_base<Key, Value, Compare, ExtractKey, bUniqueKeys,
+ rbtree<Key, Value, Compare, Allocator, ExtractKey, bMutableIterators, bUniqueKeys> >
+ {
+ public:
+ typedef ptrdiff_t difference_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t.
+ typedef Key key_type;
+ typedef Value value_type;
+ typedef rbtree_node<value_type> node_type;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+
+ typedef typename type_select<bMutableIterators,
+ rbtree_iterator<value_type, value_type*, value_type&>,
+ rbtree_iterator<value_type, const value_type*, const value_type&> >::type iterator;
+ typedef rbtree_iterator<value_type, const value_type*, const value_type&> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ typedef Allocator allocator_type;
+ typedef Compare key_compare;
+ typedef typename type_select<bUniqueKeys, eastl::pair<iterator, bool>, iterator>::type insert_return_type; // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ typedef rbtree<Key, Value, Compare, Allocator,
+ ExtractKey, bMutableIterators, bUniqueKeys> this_type;
+ typedef rb_base<Key, Value, Compare, ExtractKey, bUniqueKeys, this_type> base_type;
+ typedef integral_constant<bool, bUniqueKeys> has_unique_keys_type;
+ typedef typename base_type::extract_key extract_key;
+
+ protected:
+ using base_type::compare;
+ using base_type::get_compare;
+
+ public:
+ rbtree_node_base mAnchor; /// This node acts as end() and its mpLeft points to begin(), and mpRight points to rbegin() (the last node on the right).
+ size_type mnSize; /// Stores the count of nodes in the tree (not counting the anchor node).
+ allocator_type mAllocator; // To do: Use base class optimization to make this go away.
+
+ public:
+ // ctor/dtor
+ rbtree();
+ rbtree(const allocator_type& allocator);
+ rbtree(const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR);
+ rbtree(const this_type& x);
+ rbtree(this_type&& x);
+ rbtree(this_type&& x, const allocator_type& allocator);
+
+ template <typename InputIterator>
+ rbtree(InputIterator first, InputIterator last, const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR);
+
+ ~rbtree();
+
+ public:
+ // properties
+ const allocator_type& get_allocator() const EA_NOEXCEPT;
+ allocator_type& get_allocator() EA_NOEXCEPT;
+ void set_allocator(const allocator_type& allocator);
+
+ const key_compare& key_comp() const { return get_compare(); }
+ key_compare& key_comp() { return get_compare(); }
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(std::initializer_list<value_type> ilist);
+ this_type& operator=(this_type&& x);
+
+ void swap(this_type& x);
+
+ public:
+ // iterators
+ iterator begin() EA_NOEXCEPT;
+ const_iterator begin() const EA_NOEXCEPT;
+ const_iterator cbegin() const EA_NOEXCEPT;
+
+ iterator end() EA_NOEXCEPT;
+ const_iterator end() const EA_NOEXCEPT;
+ const_iterator cend() const EA_NOEXCEPT;
+
+ reverse_iterator rbegin() EA_NOEXCEPT;
+ const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ const_reverse_iterator crbegin() const EA_NOEXCEPT;
+
+ reverse_iterator rend() EA_NOEXCEPT;
+ const_reverse_iterator rend() const EA_NOEXCEPT;
+ const_reverse_iterator crend() const EA_NOEXCEPT;
+
+ public:
+ bool empty() const EA_NOEXCEPT;
+ size_type size() const EA_NOEXCEPT;
+
+ template <class... Args>
+ insert_return_type emplace(Args&&... args);
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator position, Args&&... args);
+
+ template <class... Args> eastl::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args);
+ template <class... Args> eastl::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args);
+ template <class... Args> iterator try_emplace(const_iterator position, const key_type& k, Args&&... args);
+ template <class... Args> iterator try_emplace(const_iterator position, key_type&& k, Args&&... args);
+
+ // Standard conversion overload to avoid the overhead of mismatched 'pair<const Key, Value>' types.
+ template <class P, class = typename eastl::enable_if<eastl::is_constructible<value_type, P&&>::value>::type>
+ insert_return_type insert(P&& otherValue);
+
+ // Currently limited to value_type instead of P because it collides with insert(InputIterator, InputIterator).
+ // To allow this to work with templated P we need to implement a compile-time specialization for the
+ // case that P&& is const_iterator and have that specialization handle insert(InputIterator, InputIterator)
+ // instead of insert(InputIterator, InputIterator). Curiously, neither libstdc++ nor libc++
+ // implement this function either, which suggests they ran into the same problem I did here
+ // and haven't yet resolved it (at least as of March 2014, GCC 4.8.1).
+ iterator insert(const_iterator hint, value_type&& value);
+
+ /// map::insert and set::insert return a pair, while multimap::insert and
+ /// multiset::insert return an iterator.
+ insert_return_type insert(const value_type& value);
+
+ // C++ standard: inserts value if and only if there is no element with
+ // key equivalent to the key of t in containers with unique keys; always
+ // inserts value in containers with equivalent keys. Always returns the
+ // iterator pointing to the element with key equivalent to the key of value.
+ // iterator position is a hint pointing to where the insert should start
+ // to search. However, there is a potential defect/improvement report on this behaviour:
+ // LWG issue #233 (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1780.html)
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ iterator insert(const_iterator position, const value_type& value);
+
+ void insert(std::initializer_list<value_type> ilist);
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last);
+
+ // TODO(rparolin):
+ // insert_return_type insert(node_type&& nh);
+ // iterator insert(const_iterator hint, node_type&& nh);
+
+ template <class M> pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj);
+ template <class M> pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj);
+ template <class M> iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj);
+ template <class M> iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj);
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ reverse_iterator erase(const_reverse_iterator position);
+ reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last);
+
+ // For some reason, multiple STL versions make a specialization
+ // for erasing an array of key_types. I'm pretty sure we don't
+ // need this, but just to be safe we will follow suit.
+ // The implementation is trivial. Returns void because the values
+ // could well be randomly distributed throughout the tree and thus
+ // a return value would be nearly meaningless.
+ void erase(const key_type* first, const key_type* last);
+
+ void clear();
+ void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ iterator find(const key_type& key);
+ const_iterator find(const key_type& key) const;
+
+ /// Implements a find whereby the user supplies a comparison of a different type
+ /// than the tree's value_type. A useful case of this is one whereby you have
+ /// a container of string objects but want to do searches via passing in char pointers.
+ /// The problem is that without this kind of find, you need to do the expensive operation
+ /// of converting the char pointer to a string so it can be used as the argument to the
+ /// find function.
+ ///
+ /// Example usage (note that the compare uses string as first type and char* as second):
+ /// set<string> strings;
+ /// strings.find_as("hello", less_2<string, const char*>());
+ ///
+ template <typename U, typename Compare2> iterator find_as(const U& u, Compare2 compare2);
+ template <typename U, typename Compare2> const_iterator find_as(const U& u, Compare2 compare2) const;
+
+ iterator lower_bound(const key_type& key);
+ const_iterator lower_bound(const key_type& key) const;
+
+ iterator upper_bound(const key_type& key);
+ const_iterator upper_bound(const key_type& key) const;
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ node_type* DoAllocateNode();
+ void DoFreeNode(node_type* pNode);
+
+ node_type* DoCreateNodeFromKey(const key_type& key);
+
+ template<class... Args>
+ node_type* DoCreateNode(Args&&... args);
+ node_type* DoCreateNode(const value_type& value);
+ node_type* DoCreateNode(value_type&& value);
+ node_type* DoCreateNode(const node_type* pNodeSource, node_type* pNodeParent);
+
+ node_type* DoCopySubtree(const node_type* pNodeSource, node_type* pNodeDest);
+ void DoNukeSubtree(node_type* pNode);
+
+ template <class... Args>
+ eastl::pair<iterator, bool> DoInsertValue(true_type, Args&&... args);
+
+ template <class... Args>
+ iterator DoInsertValue(false_type, Args&&... args);
+
+ eastl::pair<iterator, bool> DoInsertValue(true_type, value_type&& value);
+ iterator DoInsertValue(false_type, value_type&& value);
+
+ template <class... Args>
+ iterator DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, Args&&... args);
+ iterator DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, node_type* pNodeNew);
+
+ eastl::pair<iterator, bool> DoInsertKey(true_type, const key_type& key);
+ iterator DoInsertKey(false_type, const key_type& key);
+
+ template <class... Args>
+ iterator DoInsertValueHint(true_type, const_iterator position, Args&&... args);
+
+ template <class... Args>
+ iterator DoInsertValueHint(false_type, const_iterator position, Args&&... args);
+
+ iterator DoInsertValueHint(true_type, const_iterator position, value_type&& value);
+ iterator DoInsertValueHint(false_type, const_iterator position, value_type&& value);
+
+ iterator DoInsertKey(true_type, const_iterator position, const key_type& key); // By design we return iterator and not a pair.
+ iterator DoInsertKey(false_type, const_iterator position, const key_type& key);
+ iterator DoInsertKeyImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key);
+
+ node_type* DoGetKeyInsertionPositionUniqueKeys(bool& canInsert, const key_type& key);
+ node_type* DoGetKeyInsertionPositionNonuniqueKeys(const key_type& key);
+
+ node_type* DoGetKeyInsertionPositionUniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key);
+ node_type* DoGetKeyInsertionPositionNonuniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key);
+
+ }; // rbtree
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree_node_base functions
+ ///////////////////////////////////////////////////////////////////////
+
+ EASTL_API inline rbtree_node_base* RBTreeGetMinChild(const rbtree_node_base* pNodeBase)
+ {
+ while(pNodeBase->mpNodeLeft)
+ pNodeBase = pNodeBase->mpNodeLeft;
+ return const_cast<rbtree_node_base*>(pNodeBase);
+ }
+
+ EASTL_API inline rbtree_node_base* RBTreeGetMaxChild(const rbtree_node_base* pNodeBase)
+ {
+ while(pNodeBase->mpNodeRight)
+ pNodeBase = pNodeBase->mpNodeRight;
+ return const_cast<rbtree_node_base*>(pNodeBase);
+ }
+
+ // The rest of the functions are non-trivial and are found in
+ // the corresponding .cpp file to this file.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree_iterator functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator()
+ : mpNode(NULL) { }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator(const node_type* pNode)
+ : mpNode(static_cast<node_type*>(const_cast<node_type*>(pNode))) { }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator(const iterator& x)
+ : mpNode(x.mpNode) { }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::reference
+ rbtree_iterator<T, Pointer, Reference>::operator*() const
+ { return mpNode->mValue; }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::pointer
+ rbtree_iterator<T, Pointer, Reference>::operator->() const
+ { return &mpNode->mValue; }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type&
+ rbtree_iterator<T, Pointer, Reference>::operator++()
+ {
+ mpNode = static_cast<node_type*>(RBTreeIncrement(mpNode));
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type
+ rbtree_iterator<T, Pointer, Reference>::operator++(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(RBTreeIncrement(mpNode));
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type&
+ rbtree_iterator<T, Pointer, Reference>::operator--()
+ {
+ mpNode = static_cast<node_type*>(RBTreeDecrement(mpNode));
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type
+ rbtree_iterator<T, Pointer, Reference>::operator--(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(RBTreeDecrement(mpNode));
+ return temp;
+ }
+
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const rbtree_iterator<T, PointerA, ReferenceA>& a,
+ const rbtree_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const rbtree_iterator<T, PointerA, ReferenceA>& a,
+ const rbtree_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const rbtree_iterator<T, Pointer, Reference>& a,
+ const rbtree_iterator<T, Pointer, Reference>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree()
+ : mAnchor(),
+ mnSize(0),
+ mAllocator(EASTL_RBTREE_DEFAULT_NAME)
+ {
+ reset_lose_memory();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const allocator_type& allocator)
+ : mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset_lose_memory();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const C& compare, const allocator_type& allocator)
+ : base_type(compare),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset_lose_memory();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const this_type& x)
+ : base_type(x.get_compare()),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(x.mAllocator)
+ {
+ reset_lose_memory();
+
+ if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node.
+ {
+ mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, (node_type*)&mAnchor);
+ mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent);
+ mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent);
+ mnSize = x.mnSize;
+ }
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(this_type&& x)
+ : base_type(x.get_compare()),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(x.mAllocator)
+ {
+ reset_lose_memory();
+ swap(x);
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(this_type&& x, const allocator_type& allocator)
+ : base_type(x.get_compare()),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset_lose_memory();
+ swap(x); // swap will directly or indirectly handle the possibility that mAllocator != x.mAllocator.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename InputIterator>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(InputIterator first, InputIterator last, const C& compare, const allocator_type& allocator)
+ : base_type(compare),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset_lose_memory();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(; first != last; ++first)
+ insert(*first);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ clear();
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::~rbtree()
+ {
+ // Erase the entire tree. DoNukeSubtree is not a
+ // conventional erase function, as it does no rebalancing.
+ DoNukeSubtree((node_type*)mAnchor.mpNodeParent);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline const typename rbtree<K, V, C, A, E, bM, bU>::allocator_type&
+ rbtree<K, V, C, A, E, bM, bU>::get_allocator() const EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::allocator_type&
+ rbtree<K, V, C, A, E, bM, bU>::get_allocator() EA_NOEXCEPT
+ {
+ return mAllocator;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::set_allocator(const allocator_type& allocator)
+ {
+ mAllocator = allocator;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::size_type
+ rbtree<K, V, C, A, E, bM, bU>::size() const EA_NOEXCEPT
+ { return mnSize; }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline bool rbtree<K, V, C, A, E, bM, bU>::empty() const EA_NOEXCEPT
+ { return (mnSize == 0); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::begin() EA_NOEXCEPT
+ { return iterator(static_cast<node_type*>(mAnchor.mpNodeLeft)); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::begin() const EA_NOEXCEPT
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(mAnchor.mpNodeLeft))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::cbegin() const EA_NOEXCEPT
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(mAnchor.mpNodeLeft))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::end() EA_NOEXCEPT
+ { return iterator(static_cast<node_type*>(&mAnchor)); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::end() const EA_NOEXCEPT
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(&mAnchor))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::cend() const EA_NOEXCEPT
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(&mAnchor))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rbegin() EA_NOEXCEPT
+ { return reverse_iterator(end()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rbegin() const EA_NOEXCEPT
+ { return const_reverse_iterator(end()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::crbegin() const EA_NOEXCEPT
+ { return const_reverse_iterator(end()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rend() EA_NOEXCEPT
+ { return reverse_iterator(begin()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rend() const EA_NOEXCEPT
+ { return const_reverse_iterator(begin()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::crend() const EA_NOEXCEPT
+ { return const_reverse_iterator(begin()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::this_type&
+ rbtree<K, V, C, A, E, bM, bU>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mAllocator = x.mAllocator;
+ #endif
+
+ get_compare() = x.get_compare();
+
+ if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node.
+ {
+ mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, (node_type*)&mAnchor);
+ mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent);
+ mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent);
+ mnSize = x.mnSize;
+ }
+ }
+ return *this;
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::this_type&
+ rbtree<K, V, C, A, E, bM, bU>::operator=(this_type&& x)
+ {
+ if(this != &x)
+ {
+ clear(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor.
+ swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy.
+ }
+ return *this;
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::this_type&
+ rbtree<K, V, C, A, E, bM, bU>::operator=(std::initializer_list<value_type> ilist)
+ {
+ // The simplest means of doing this is to clear and insert. There probably isn't a generic
+ // solution that's any more efficient without having prior knowledge of the ilist contents.
+ clear();
+
+ for(typename std::initializer_list<value_type>::iterator it = ilist.begin(), itEnd = ilist.end(); it != itEnd; ++it)
+ DoInsertValue(has_unique_keys_type(), eastl::move(*it));
+
+ return *this;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ void rbtree<K, V, C, A, E, bM, bU>::swap(this_type& x)
+ {
+ #if EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ if(mAllocator == x.mAllocator) // If allocators are equivalent...
+ #endif
+ {
+ // Most of our members can be exchaged by a basic swap:
+ // We leave mAllocator as-is.
+ eastl::swap(mnSize, x.mnSize);
+ eastl::swap(get_compare(), x.get_compare());
+ #if !EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ eastl::swap(mAllocator, x.mAllocator);
+ #endif
+
+
+ // However, because our anchor node is a part of our class instance and not
+ // dynamically allocated, we can't do a swap of it but must do a more elaborate
+ // procedure. This is the downside to having the mAnchor be like this, but
+ // otherwise we consider it a good idea to avoid allocating memory for a
+ // nominal container instance.
+
+ // We optimize for the expected most common case: both pointers being non-null.
+ if(mAnchor.mpNodeParent && x.mAnchor.mpNodeParent) // If both pointers are non-null...
+ {
+ eastl::swap(mAnchor.mpNodeRight, x.mAnchor.mpNodeRight);
+ eastl::swap(mAnchor.mpNodeLeft, x.mAnchor.mpNodeLeft);
+ eastl::swap(mAnchor.mpNodeParent, x.mAnchor.mpNodeParent);
+
+ // We need to fix up the anchors to point to themselves (we can't just swap them).
+ mAnchor.mpNodeParent->mpNodeParent = &mAnchor;
+ x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor;
+ }
+ else if(mAnchor.mpNodeParent)
+ {
+ x.mAnchor.mpNodeRight = mAnchor.mpNodeRight;
+ x.mAnchor.mpNodeLeft = mAnchor.mpNodeLeft;
+ x.mAnchor.mpNodeParent = mAnchor.mpNodeParent;
+ x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor;
+
+ // We need to fix up our anchor to point it itself (we can't have it swap with x).
+ mAnchor.mpNodeRight = &mAnchor;
+ mAnchor.mpNodeLeft = &mAnchor;
+ mAnchor.mpNodeParent = NULL;
+ }
+ else if(x.mAnchor.mpNodeParent)
+ {
+ mAnchor.mpNodeRight = x.mAnchor.mpNodeRight;
+ mAnchor.mpNodeLeft = x.mAnchor.mpNodeLeft;
+ mAnchor.mpNodeParent = x.mAnchor.mpNodeParent;
+ mAnchor.mpNodeParent->mpNodeParent = &mAnchor;
+
+ // We need to fix up x's anchor to point it itself (we can't have it swap with us).
+ x.mAnchor.mpNodeRight = &x.mAnchor;
+ x.mAnchor.mpNodeLeft = &x.mAnchor;
+ x.mAnchor.mpNodeParent = NULL;
+ } // Else both are NULL and there is nothing to do.
+ }
+ #if EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
+ else
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ rbtree<K, V, C, A, E, bM, bU>::emplace(Args&&... args)
+ {
+ return DoInsertValue(has_unique_keys_type(), eastl::forward<Args>(args)...);
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::emplace_hint(const_iterator position, Args&&... args)
+ {
+ return DoInsertValueHint(has_unique_keys_type(), position, eastl::forward<Args>(args)...);
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ inline eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::try_emplace(const key_type& key, Args&&... args)
+ {
+ return DoInsertValue(has_unique_keys_type(), piecewise_construct, eastl::forward_as_tuple(key), eastl::forward_as_tuple(eastl::forward<Args>(args)...));
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ inline eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::try_emplace(key_type&& key, Args&&... args)
+ {
+ return DoInsertValue(has_unique_keys_type(), piecewise_construct, eastl::forward_as_tuple(eastl::move(key)), eastl::forward_as_tuple(eastl::forward<Args>(args)...));
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::try_emplace(const_iterator position, const key_type& key, Args&&... args)
+ {
+ return DoInsertValueHint(
+ has_unique_keys_type(), position,
+ piecewise_construct, eastl::forward_as_tuple(key), eastl::forward_as_tuple(eastl::forward<Args>(args)...));
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::try_emplace(const_iterator position, key_type&& key, Args&&... args)
+ {
+ return DoInsertValueHint(
+ has_unique_keys_type(), position,
+ piecewise_construct, eastl::forward_as_tuple(eastl::move(key)), eastl::forward_as_tuple(eastl::forward<Args>(args)...));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class P, class>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ rbtree<K, V, C, A, E, bM, bU>::insert(P&& otherValue)
+ {
+ // Need to use forward instead of move because P&& is a "universal reference" instead of an rvalue reference.
+ return emplace(eastl::forward<P>(otherValue));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::insert(const_iterator position, value_type&& value)
+ {
+ return DoInsertValueHint(has_unique_keys_type(), position, eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ rbtree<K, V, C, A, E, bM, bU>::insert(const value_type& value)
+ {
+ return DoInsertValue(has_unique_keys_type(), value);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::insert(const_iterator position, const value_type& value)
+ {
+ return DoInsertValueHint(has_unique_keys_type(), position, value);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class M>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::insert_or_assign(const key_type& k, M&& obj)
+ {
+ auto iter = find(k);
+
+ if(iter == end())
+ {
+ return insert(value_type(piecewise_construct, eastl::forward_as_tuple(k), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return {iter, false};
+ }
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class M>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::insert_or_assign(key_type&& k, M&& obj)
+ {
+ auto iter = find(k);
+
+ if(iter == end())
+ {
+ return insert(value_type(piecewise_construct, eastl::forward_as_tuple(eastl::move(k)), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return {iter, false};
+ }
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class M>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::insert_or_assign(const_iterator hint, const key_type& k, M&& obj)
+ {
+ auto iter = find(k);
+
+ if(iter == end())
+ {
+ return insert(hint, value_type(piecewise_construct, eastl::forward_as_tuple(k), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return iter;
+ }
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class M>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::insert_or_assign(const_iterator hint, key_type&& k, M&& obj)
+ {
+ auto iter = find(k);
+
+ if(iter == end())
+ {
+ return insert(hint, value_type(piecewise_construct, eastl::forward_as_tuple(eastl::move(k)), eastl::forward_as_tuple(eastl::forward<M>(obj))));
+ }
+ else
+ {
+ iter->second = eastl::forward<M>(obj);
+ return iter;
+ }
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoGetKeyInsertionPositionUniqueKeys(bool& canInsert, const key_type& key)
+ {
+ // This code is essentially a slightly modified copy of the the rbtree::insert
+ // function whereby this version takes a key and not a full value_type.
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pLowerBound = (node_type*)&mAnchor; // Set it to the container end for now.
+ node_type* pParent; // This will be where we insert the new node.
+
+ bool bValueLessThanNode = true; // If the tree is empty, this will result in an insertion at the front.
+
+ // Find insertion position of the value. This will either be a position which
+ // already contains the value, a position which is greater than the value or
+ // end(), which we treat like a position which is greater than the value.
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ bValueLessThanNode = compare(key, extractKey(pCurrent->mValue));
+ pLowerBound = pCurrent;
+
+ if(bValueLessThanNode)
+ {
+ EASTL_VALIDATE_COMPARE(!compare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ pParent = pLowerBound; // pLowerBound is actually upper bound right now (i.e. it is > value instead of <=), but we will make it the lower bound below.
+
+ if(bValueLessThanNode) // If we ended up on the left side of the last parent node...
+ {
+ if(EASTL_LIKELY(pLowerBound != (node_type*)mAnchor.mpNodeLeft)) // If the tree was empty or if we otherwise need to insert at the very front of the tree...
+ {
+ // At this point, pLowerBound points to a node which is > than value.
+ // Move it back by one, so that it points to a node which is <= value.
+ pLowerBound = (node_type*)RBTreeDecrement(pLowerBound);
+ }
+ else
+ {
+ canInsert = true;
+ return pLowerBound;
+ }
+ }
+
+ // Since here we require values to be unique, we will do nothing if the value already exists.
+ if(compare(extractKey(pLowerBound->mValue), key)) // If the node is < the value (i.e. if value is >= the node)...
+ {
+ EASTL_VALIDATE_COMPARE(!compare(key, extractKey(pLowerBound->mValue))); // Validate that the compare function is sane.
+ canInsert = true;
+ return pParent;
+ }
+
+ // The item already exists (as found by the compare directly above), so return false.
+ canInsert = false;
+ return pLowerBound;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoGetKeyInsertionPositionNonuniqueKeys(const key_type& key)
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+ extract_key extractKey;
+
+ while(pCurrent)
+ {
+ pRangeEnd = pCurrent;
+
+ if(compare(key, extractKey(pCurrent->mValue)))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ return pRangeEnd;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(true_type, value_type&& value)
+ {
+ extract_key extractKey;
+ key_type key(extractKey(value));
+ bool canInsert;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key);
+
+ if(canInsert)
+ {
+ const iterator itResult(DoInsertValueImpl(pPosition, false, key, eastl::move(value)));
+ return pair<iterator, bool>(itResult, true);
+ }
+
+ return pair<iterator, bool>(iterator(pPosition), false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(false_type, value_type&& value)
+ {
+ extract_key extractKey;
+ key_type key(extractKey(value));
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key);
+
+ return DoInsertValueImpl(pPosition, false, key, eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(true_type, Args&&... args) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ // Note that we return a pair and not an iterator. This is because the C++ standard for map
+ // and set is to return a pair and not just an iterator.
+
+ node_type* pNodeNew = DoCreateNode(eastl::forward<Args>(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ const key_type& key = extract_key{}(pNodeNew->mValue);
+
+ bool canInsert;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key);
+
+ if(canInsert)
+ {
+ iterator itResult(DoInsertValueImpl(pPosition, false, key, pNodeNew));
+ return pair<iterator, bool>(itResult, true);
+ }
+
+ DoFreeNode(pNodeNew);
+ return pair<iterator, bool>(iterator(pPosition), false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(false_type, Args&&... args) // false_type means keys are not unique.
+ {
+ // We have a problem here if sizeof(value_type) is too big for the stack. We may want to consider having a specialization for large value_types.
+ // To do: Change this so that we call DoCreateNode(eastl::forward<Args>(args)...) here and use the value from the resulting pNode to get the
+ // key, and make DoInsertValueImpl take that node as an argument. That way there is no value created on the stack.
+
+ node_type* const pNodeNew = DoCreateNode(eastl::forward<Args>(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ const key_type& key = extract_key{}(pNodeNew->mValue);
+
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key);
+
+ return DoInsertValueImpl(pPosition, false, key, pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, Args&&... args)
+ {
+ node_type* const pNodeNew = DoCreateNode(eastl::forward<Args>(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+
+ return DoInsertValueImpl(pNodeParent, bForceToLeft, key, pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, node_type* pNodeNew)
+ {
+ EASTL_ASSERT_MSG(pNodeNew != nullptr, "node to insert to the rbtree must not be null");
+
+ RBTreeSide side;
+ extract_key extractKey;
+
+ // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal.
+ // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report
+ // suggests that we should use the insert hint position to force an ordering. So that's what we do.
+ if(bForceToLeft || (pNodeParent == &mAnchor) || compare(key, extractKey(pNodeParent->mValue)))
+ side = kRBTreeSideLeft;
+ else
+ side = kRBTreeSideRight;
+
+ RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side);
+ mnSize++;
+
+ return iterator(pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(true_type, const key_type& key) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ // Note that we return a pair and not an iterator. This is because the C++ standard for map
+ // and set is to return a pair and not just an iterator.
+ bool canInsert;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key);
+
+ if(canInsert)
+ {
+ const iterator itResult(DoInsertKeyImpl(pPosition, false, key));
+ return pair<iterator, bool>(itResult, true);
+ }
+
+ return pair<iterator, bool>(iterator(pPosition), false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(false_type, const key_type& key) // false_type means keys are not unique.
+ {
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key);
+
+ return DoInsertKeyImpl(pPosition, false, key);
+ }
+
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoGetKeyInsertionPositionUniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key)
+ {
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position.mpNode);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+ const bool bPositionLessThanValue = compare(extractKey(position.mpNode->mValue), key);
+
+ if(bPositionLessThanValue) // If (value > *position)...
+ {
+ EASTL_VALIDATE_COMPARE(!compare(key, extractKey(position.mpNode->mValue))); // Validate that the compare function is sane.
+
+ const bool bValueLessThanNext = compare(key, extractKey(itNext.mpNode->mValue));
+
+ if(bValueLessThanNext) // If value < *itNext...
+ {
+ EASTL_VALIDATE_COMPARE(!compare(extractKey(itNext.mpNode->mValue), key)); // Validate that the compare function is sane.
+
+ if(position.mpNode->mpNodeRight)
+ {
+ bForceToLeft = true; // Specifically insert in front of (to the left of) itNext (and thus after 'position').
+ return itNext.mpNode;
+ }
+
+ bForceToLeft = false;
+ return position.mpNode;
+ }
+ }
+
+ bForceToLeft = false;
+ return NULL; // The above specified hint was not useful, then we do a regular insertion.
+ }
+
+ if(mnSize && compare(extractKey(((node_type*)mAnchor.mpNodeRight)->mValue), key))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(key, extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))); // Validate that the compare function is sane.
+ bForceToLeft = false;
+ return (node_type*)mAnchor.mpNodeRight;
+ }
+
+ bForceToLeft = false;
+ return NULL; // The caller can do a default insert.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoGetKeyInsertionPositionNonuniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key)
+ {
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position.mpNode);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+ if(!compare(key, extractKey(position.mpNode->mValue)) && // If value >= *position &&
+ !compare(extractKey(itNext.mpNode->mValue), key)) // if value <= *itNext...
+ {
+ if(position.mpNode->mpNodeRight) // If there are any nodes to the right... [this expression will always be true as long as we aren't at the end()]
+ {
+ bForceToLeft = true; // Specifically insert in front of (to the left of) itNext (and thus after 'position').
+ return itNext.mpNode;
+ }
+
+ bForceToLeft = false;
+ return position.mpNode;
+ }
+
+ bForceToLeft = false;
+ return NULL; // The above specified hint was not useful, then we do a regular insertion.
+ }
+
+ // This pathway shouldn't be commonly executed, as the user shouldn't be calling
+ // this hinted version of insert if the user isn't providing a useful hint.
+ if(mnSize && !compare(key, extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))) // If we are non-empty and the value is >= the last node...
+ {
+ bForceToLeft =false;
+ return (node_type*)mAnchor.mpNodeRight;
+ }
+
+ bForceToLeft = false;
+ return NULL;
+ }
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueHint(true_type, const_iterator position, Args&&... args) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+
+ node_type* pNodeNew = DoCreateNode(eastl::forward<Args>(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ const key_type& key(extract_key{}(pNodeNew->mValue));
+
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeysHint(position, bForceToLeft, key);
+
+ if (!pPosition)
+ {
+ bool canInsert;
+ pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key);
+
+ if (!canInsert)
+ {
+ DoFreeNode(pNodeNew);
+ return iterator(pPosition);
+ }
+
+ bForceToLeft = false;
+ }
+
+ return DoInsertValueImpl(pPosition, bForceToLeft, key, pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueHint(false_type, const_iterator position, Args&&... args) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+
+ node_type* pNodeNew = DoCreateNode(eastl::forward<Args>(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ const key_type& key(extract_key{}(pNodeNew->mValue));
+
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeysHint(position, bForceToLeft, key);
+
+ if (!pPosition)
+ {
+ pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key);
+ bForceToLeft = false;
+ }
+
+ return DoInsertValueImpl(pPosition, bForceToLeft, key, pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueHint(true_type, const_iterator position, value_type&& value) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+
+ extract_key extractKey;
+ key_type key(extractKey(value));
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeysHint(position, bForceToLeft, key);
+
+ if(pPosition)
+ return DoInsertValueImpl(pPosition, bForceToLeft, key, eastl::move(value));
+ else
+ return DoInsertValue(has_unique_keys_type(), eastl::move(value)).first;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueHint(false_type, const_iterator position, value_type&& value) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ extract_key extractKey;
+ key_type key(extractKey(value));
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeysHint(position, bForceToLeft, key);
+
+ if(pPosition)
+ return DoInsertValueImpl(pPosition, bForceToLeft, key, eastl::move(value));
+ else
+ return DoInsertValue(has_unique_keys_type(), eastl::move(value));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(true_type, const_iterator position, const key_type& key) // true_type means keys are unique.
+ {
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionUniqueKeysHint(position, bForceToLeft, key);
+
+ if(pPosition)
+ return DoInsertKeyImpl(pPosition, bForceToLeft, key);
+ else
+ return DoInsertKey(has_unique_keys_type(), key).first;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(false_type, const_iterator position, const key_type& key) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ bool bForceToLeft;
+ node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeysHint(position, bForceToLeft, key);
+
+ if(pPosition)
+ return DoInsertKeyImpl(pPosition, bForceToLeft, key);
+ else
+ return DoInsertKey(has_unique_keys_type(), key); // We are empty or we are inserting at the end.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKeyImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key)
+ {
+ RBTreeSide side;
+ extract_key extractKey;
+
+ // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal.
+ // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report
+ // suggests that we should use the insert hint position to force an ordering. So that's what we do.
+ if(bForceToLeft || (pNodeParent == &mAnchor) || compare(key, extractKey(pNodeParent->mValue)))
+ side = kRBTreeSideLeft;
+ else
+ side = kRBTreeSideRight;
+
+ node_type* const pNodeNew = DoCreateNodeFromKey(key); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side);
+ mnSize++;
+
+ return iterator(pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ void rbtree<K, V, C, A, E, bM, bU>::insert(std::initializer_list<value_type> ilist)
+ {
+ for(typename std::initializer_list<value_type>::iterator it = ilist.begin(), itEnd = ilist.end(); it != itEnd; ++it)
+ DoInsertValue(has_unique_keys_type(), eastl::move(*it));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename InputIterator>
+ void rbtree<K, V, C, A, E, bM, bU>::insert(InputIterator first, InputIterator last)
+ {
+ for( ; first != last; ++first)
+ DoInsertValue(has_unique_keys_type(), *first); // Or maybe we should call 'insert(end(), *first)' instead. If the first-last range was sorted then this might make some sense.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::clear()
+ {
+ // Erase the entire tree. DoNukeSubtree is not a
+ // conventional erase function, as it does no rebalancing.
+ DoNukeSubtree((node_type*)mAnchor.mpNodeParent);
+ reset_lose_memory();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::reset_lose_memory()
+ {
+ // The reset_lose_memory function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ mAnchor.mpNodeRight = &mAnchor;
+ mAnchor.mpNodeLeft = &mAnchor;
+ mAnchor.mpNodeParent = NULL;
+ mAnchor.mColor = kRBTreeColorRed;
+ mnSize = 0;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(const_iterator position)
+ {
+ const iterator iErase(position.mpNode);
+ --mnSize; // Interleave this between the two references to itNext. We expect no exceptions to occur during the code below.
+ ++position;
+ RBTreeErase(iErase.mpNode, &mAnchor);
+ DoFreeNode(iErase.mpNode);
+ return iterator(position.mpNode);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(const_iterator first, const_iterator last)
+ {
+ // We expect that if the user means to clear the container, they will call clear.
+ if(EASTL_LIKELY((first.mpNode != mAnchor.mpNodeLeft) || (last.mpNode != &mAnchor))) // If (first != begin or last != end) ...
+ {
+ // Basic implementation:
+ while(first != last)
+ first = erase(first);
+ return iterator(first.mpNode);
+
+ // Inlined implementation:
+ //size_type n = 0;
+ //while(first != last)
+ //{
+ // const iterator itErase(first);
+ // ++n;
+ // ++first;
+ // RBTreeErase(itErase.mpNode, &mAnchor);
+ // DoFreeNode(itErase.mpNode);
+ //}
+ //mnSize -= n;
+ //return first;
+ }
+
+ clear();
+ return iterator((node_type*)&mAnchor); // Same as: return end();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(const_reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(const_reverse_iterator first, const_reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::erase(const key_type* first, const key_type* last)
+ {
+ // We have no choice but to run a loop like this, as the first/last range could
+ // have values that are discontiguously located in the tree. And some may not
+ // even be in the tree.
+ while(first != last)
+ erase(*first++);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::find(const key_type& key)
+ {
+ // To consider: Implement this instead via calling lower_bound and
+ // inspecting the result. The following is an implementation of this:
+ // const iterator it(lower_bound(key));
+ // return ((it.mpNode == &mAnchor) || compare(key, extractKey(it.mpNode->mValue))) ? iterator(&mAnchor) : it;
+ // We don't currently implement the above because in practice people tend to call
+ // find a lot with trees, but very uncommonly call lower_bound.
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!compare(extractKey(pCurrent->mValue), key))) // If pCurrent is >= key...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!compare(key, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !compare(key, extractKey(pRangeEnd->mValue))))
+ return iterator(pRangeEnd);
+ return iterator((node_type*)&mAnchor);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::find(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->find(key));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename U, typename Compare2>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::find_as(const U& u, Compare2 compare2)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!compare2(extractKey(pCurrent->mValue), u))) // If pCurrent is >= u...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!compare2(u, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !compare2(u, extractKey(pRangeEnd->mValue))))
+ return iterator(pRangeEnd);
+ return iterator((node_type*)&mAnchor);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename U, typename Compare2>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::find_as(const U& u, Compare2 compare2) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->find_as(u, compare2));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::lower_bound(const key_type& key)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!compare(extractKey(pCurrent->mValue), key))) // If pCurrent is >= key...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!compare(key, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ return iterator(pRangeEnd);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::lower_bound(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->lower_bound(key));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::upper_bound(const key_type& key)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(compare(key, extractKey(pCurrent->mValue)))) // If key is < pCurrent...
+ {
+ EASTL_VALIDATE_COMPARE(!compare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ return iterator(pRangeEnd);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::upper_bound(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->upper_bound(key));
+ }
+
+
+ // To do: Move this validate function entirely to a template-less implementation.
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ bool rbtree<K, V, C, A, E, bM, bU>::validate() const
+ {
+ // Red-black trees have the following canonical properties which we validate here:
+ // 1 Every node is either red or black.
+ // 2 Every leaf (NULL) is black by defintion. Any number of black nodes may appear in a sequence.
+ // 3 If a node is red, then both its children are black. Thus, on any path from
+ // the root to a leaf, red nodes must not be adjacent.
+ // 4 Every simple path from a node to a descendant leaf contains the same number of black nodes.
+ // 5 The mnSize member of the tree must equal the number of nodes in the tree.
+ // 6 The tree is sorted as per a conventional binary tree.
+ // 7 The comparison function is sane; it obeys strict weak ordering. If compare(a,b) is true, then compare(b,a) must be false. Both cannot be true.
+
+ extract_key extractKey;
+
+ if(mnSize)
+ {
+ // Verify basic integrity.
+ //if(!mAnchor.mpNodeParent || (mAnchor.mpNodeLeft == mAnchor.mpNodeRight))
+ // return false; // Fix this for case of empty tree.
+
+ if(mAnchor.mpNodeLeft != RBTreeGetMinChild(mAnchor.mpNodeParent))
+ return false;
+
+ if(mAnchor.mpNodeRight != RBTreeGetMaxChild(mAnchor.mpNodeParent))
+ return false;
+
+ const size_t nBlackCount = RBTreeGetBlackCount(mAnchor.mpNodeParent, mAnchor.mpNodeLeft);
+ size_type nIteratedSize = 0;
+
+ for(const_iterator it = begin(); it != end(); ++it, ++nIteratedSize)
+ {
+ const node_type* const pNode = (const node_type*)it.mpNode;
+ const node_type* const pNodeRight = (const node_type*)pNode->mpNodeRight;
+ const node_type* const pNodeLeft = (const node_type*)pNode->mpNodeLeft;
+
+ // Verify #7 above.
+ if(pNodeRight && compare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue)) && compare(extractKey(pNode->mValue), extractKey(pNodeRight->mValue))) // Validate that the compare function is sane.
+ return false;
+
+ // Verify #7 above.
+ if(pNodeLeft && compare(extractKey(pNodeLeft->mValue), extractKey(pNode->mValue)) && compare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue))) // Validate that the compare function is sane.
+ return false;
+
+ // Verify item #1 above.
+ if((pNode->mColor != kRBTreeColorRed) && (pNode->mColor != kRBTreeColorBlack))
+ return false;
+
+ // Verify item #3 above.
+ if(pNode->mColor == kRBTreeColorRed)
+ {
+ if((pNodeRight && (pNodeRight->mColor == kRBTreeColorRed)) ||
+ (pNodeLeft && (pNodeLeft->mColor == kRBTreeColorRed)))
+ return false;
+ }
+
+ // Verify item #6 above.
+ if(pNodeRight && compare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue)))
+ return false;
+
+ if(pNodeLeft && compare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue)))
+ return false;
+
+ if(!pNodeRight && !pNodeLeft) // If we are at a bottom node of the tree...
+ {
+ // Verify item #4 above.
+ if(RBTreeGetBlackCount(mAnchor.mpNodeParent, pNode) != nBlackCount)
+ return false;
+ }
+ }
+
+ // Verify item #5 above.
+ if(nIteratedSize != mnSize)
+ return false;
+
+ return true;
+ }
+ else
+ {
+ if((mAnchor.mpNodeLeft != &mAnchor) || (mAnchor.mpNodeRight != &mAnchor))
+ return false;
+ }
+
+ return true;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline int rbtree<K, V, C, A, E, bM, bU>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoAllocateNode()
+ {
+ auto* pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(node_type), 0);
+ EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::DoFreeNode(node_type* pNode)
+ {
+ pNode->~node_type();
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNodeFromKey(const key_type& key)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new (eastl::addressof(pNode->mValue)) value_type(pair_first_construct, key);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #endif
+
+ #if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(const value_type& value)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(value);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #endif
+
+ #if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(value_type&& value)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(eastl::move(value));
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #endif
+
+ #if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template<class... Args>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(Args&&... args)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(eastl::addressof(pNode->mValue)) value_type(eastl::forward<Args>(args)...);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #endif
+
+ #if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(const node_type* pNodeSource, node_type* pNodeParent)
+ {
+ node_type* const pNode = DoCreateNode(pNodeSource->mValue);
+
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = pNodeParent;
+ pNode->mColor = pNodeSource->mColor;
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCopySubtree(const node_type* pNodeSource, node_type* pNodeDest)
+ {
+ node_type* const pNewNodeRoot = DoCreateNode(pNodeSource, pNodeDest);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // Copy the right side of the tree recursively.
+ if(pNodeSource->mpNodeRight)
+ pNewNodeRoot->mpNodeRight = DoCopySubtree((const node_type*)pNodeSource->mpNodeRight, pNewNodeRoot);
+
+ node_type* pNewNodeLeft;
+
+ for(pNodeSource = (node_type*)pNodeSource->mpNodeLeft, pNodeDest = pNewNodeRoot;
+ pNodeSource;
+ pNodeSource = (node_type*)pNodeSource->mpNodeLeft, pNodeDest = pNewNodeLeft)
+ {
+ pNewNodeLeft = DoCreateNode(pNodeSource, pNodeDest);
+
+ pNodeDest->mpNodeLeft = pNewNodeLeft;
+
+ // Copy the right side of the tree recursively.
+ if(pNodeSource->mpNodeRight)
+ pNewNodeLeft->mpNodeRight = DoCopySubtree((const node_type*)pNodeSource->mpNodeRight, pNewNodeLeft);
+ }
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoNukeSubtree(pNewNodeRoot);
+ throw;
+ }
+ #endif
+
+ return pNewNodeRoot;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ void rbtree<K, V, C, A, E, bM, bU>::DoNukeSubtree(node_type* pNode)
+ {
+ while(pNode) // Recursively traverse the tree and destroy items as we go.
+ {
+ DoNukeSubtree((node_type*)pNode->mpNodeRight);
+
+ node_type* const pNodeLeft = (node_type*)pNode->mpNodeLeft;
+ DoFreeNode(pNode);
+ pNode = pNodeLeft;
+ }
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator==(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin());
+ }
+
+
+ // Note that in operator< we do comparisons based on the tree value_type with operator<() of the
+ // value_type instead of the tree's Compare function. For set/multiset, the value_type is T, while
+ // for map/multimap the value_type is a pair<Key, T>. operator< for pair can be seen by looking
+ // utility.h, but it basically is uses the operator< for pair.first and pair.second. The C++ standard
+ // appears to require this behaviour, whether intentionally or not. If anything, a good reason to do
+ // this is for consistency. A map and a vector that contain the same items should compare the same.
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator<(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator!=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator>(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator<=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator>=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline void swap(rbtree<K, V, C, A, E, bM, bU>& a, rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/include/EASTL/internal/smart_ptr.h b/include/EASTL/internal/smart_ptr.h
new file mode 100644
index 0000000..f1d52e1
--- /dev/null
+++ b/include/EASTL/internal/smart_ptr.h
@@ -0,0 +1,264 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_SMART_PTR_H
+#define EASTL_INTERNAL_SMART_PTR_H
+
+
+#include <EABase/eabase.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/memory.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+namespace eastl
+{
+
+ namespace Internal
+ {
+ // Tells if the Deleter type has a typedef for pointer to T. If so then return it,
+ // else return T*. The large majority of the time the pointer type will be T*.
+ // The C++11 Standard requires that scoped_ptr let the deleter define the pointer type.
+ //
+ // Example usage:
+ // typedef typename unique_pointer_type<int, SomeDeleter>::type pointer
+ //
+ template <typename T, typename Deleter>
+ class unique_pointer_type
+ {
+ template <typename U>
+ static typename U::pointer test(typename U::pointer*);
+
+ template <typename U>
+ static T* test(...);
+
+ public:
+ typedef decltype(test<typename eastl::remove_reference<Deleter>::type>(0)) type;
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_array_cv_convertible
+ //
+ // Tells if the array pointer P1 is cv-convertible to array pointer P2.
+ // The two types have two be equivalent pointer types and be convertible
+ // when you consider const/volatile properties of them.
+ //
+ // Example usage:
+ // is_array_cv_convertible<int, Base*>::value => false
+ // is_array_cv_convertible<Base, Base*>::value => false
+ // is_array_cv_convertible<double*, bool*>::value => false
+ // is_array_cv_convertible<Subclass*, Base*>::value => false
+ // is_array_cv_convertible<const Base*, Base*>::value => false
+ // is_array_cv_convertible<Base*, Base*>::value => true
+ // is_array_cv_convertible<Base*, const Base*>::value => true
+ // is_array_cv_convertible<Base*, volatile Base*>::value => true
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_array_cv_convertible_CONFORMANCE 1
+
+ template <typename P1, typename P2, bool = eastl::is_same_v<eastl::remove_cv_t<typename pointer_traits<P1>::element_type>,
+ eastl::remove_cv_t<typename pointer_traits<P2>::element_type>>>
+ struct is_array_cv_convertible_impl
+ : public eastl::is_convertible<P1, P2> {}; // Return true if P1 is convertible to P2.
+
+ template <typename P1, typename P2>
+ struct is_array_cv_convertible_impl<P1, P2, false>
+ : public eastl::false_type {}; // P1's underlying type is not the same as P2's, so it can't be converted, even if P2 refers to a subclass of P1. Parent == Child, but Parent[] != Child[]
+
+ template <typename P1, typename P2, bool = eastl::is_scalar_v<P1> && !eastl::is_pointer_v<P1>>
+ struct is_array_cv_convertible
+ : public is_array_cv_convertible_impl<P1, P2> {};
+
+ template <typename P1, typename P2>
+ struct is_array_cv_convertible<P1, P2, true>
+ : public eastl::false_type {}; // P1 is scalar not a pointer, so it can't be converted to a pointer.
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_derived
+ //
+ // Given two (possibly identical) types Base and Derived, is_base_of<Base, Derived>::value == true
+ // if and only if Base is a direct or indirect base class of Derived. This is like is_base_of<Base, Derived>
+ // but returns false if Derived is the same as Base. So is_derived is true only if Derived is actually a subclass
+ // of Base and not Base itself.
+ //
+ // is_derived may only be applied to complete types.
+ //
+ // Example usage:
+ // is_derived<int, int>::value => false
+ // is_derived<int, bool>::value => false
+ // is_derived<Parent, Child>::value => true
+ // is_derived<Child, Parent>::value => false
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_TYPE_TRAIT_is_base_of_CONFORMANCE
+ #define EASTL_TYPE_TRAIT_is_derived_CONFORMANCE 1
+
+ template <typename Base, typename Derived>
+ struct is_derived : public eastl::integral_constant<bool, eastl::is_base_of<Base, Derived>::value && !eastl::is_same<typename eastl::remove_cv<Base>::type, typename eastl::remove_cv<Derived>::type>::value> {};
+ #else
+ #define EASTL_TYPE_TRAIT_is_derived_CONFORMANCE 0
+
+ template <typename Base, typename Derived> // This returns true if Derived is unrelated to Base. That's a wrong answer, but is better for us than returning false for compilers that don't support is_base_of.
+ struct is_derived : public eastl::integral_constant<bool, !eastl::is_same<typename eastl::remove_cv<Base>::type, typename eastl::remove_cv<Derived>::type>::value> {};
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_safe_array_conversion
+ //
+ // Say you have two array types: T* t and U* u. You want to assign the u to t but only if
+ // that's a safe thing to do. As shown in the logic below, the array conversion
+ // is safe if U* and T* are convertible, if U is an array, and if either U or T is not
+ // a pointer or U is not derived from T.
+ //
+ // Note: Usage of this class could be replaced with is_array_cv_convertible usage.
+ // To do: Do this replacement and test it.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename T_pointer, typename U, typename U_pointer>
+ struct is_safe_array_conversion : public eastl::integral_constant<bool, eastl::is_convertible<U_pointer, T_pointer>::value &&
+ eastl::is_array<U>::value &&
+ (!eastl::is_pointer<U_pointer>::value || !is_pointer<T_pointer>::value || !Internal::is_derived<T, typename eastl::remove_extent<U>::type>::value)> {};
+
+ } // namespace Internal
+
+
+
+
+
+
+
+ /// default_delete
+ ///
+ /// C++11 smart pointer default delete function class.
+ ///
+ /// Provides a default way to delete an object. This default is simply to call delete on the
+ /// object pointer. You can provide an alternative to this class or you can override this on
+ /// a class-by-class basis like the following:
+ /// template <>
+ /// struct smart_ptr_deleter<MyClass>
+ /// {
+ /// void operator()(MyClass* p) const
+ /// { SomeCustomFunction(p); }
+ /// };
+ ///
+ template <typename T>
+ struct default_delete
+ {
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION <= 4006) // GCC prior to 4.7 has a bug with noexcept here.
+ EA_CONSTEXPR default_delete() = default;
+ #else
+ EA_CONSTEXPR default_delete() EA_NOEXCEPT = default;
+ #endif
+
+ template <typename U> // Enable if T* can be constructed with U* (i.e. U* is convertible to T*).
+ default_delete(const default_delete<U>&, typename eastl::enable_if<is_convertible<U*, T*>::value>::type* = 0) EA_NOEXCEPT {}
+
+ void operator()(T* p) const EA_NOEXCEPT
+ { delete p; }
+ };
+
+
+ template <typename T>
+ struct default_delete<T[]> // Specialization for arrays.
+ {
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION <= 4006) // GCC prior to 4.7 has a bug with noexcept here.
+ EA_CONSTEXPR default_delete() = default;
+ #else
+ EA_CONSTEXPR default_delete() EA_NOEXCEPT = default;
+ #endif
+
+ template <typename U> // This ctor is enabled if T is equal to or a base of U, and if U is less or equal const/volatile-qualified than T.
+ default_delete(const default_delete<U[]>&, typename eastl::enable_if<Internal::is_array_cv_convertible<U*, T*>::value>::type* = 0) EA_NOEXCEPT {}
+
+ void operator()(T* p) const EA_NOEXCEPT
+ { delete[] p; }
+ };
+
+
+
+
+ /// smart_ptr_deleter
+ ///
+ /// Deprecated in favor of the C++11 name: default_delete
+ ///
+ template <typename T>
+ struct smart_ptr_deleter
+ {
+ typedef T value_type;
+
+ void operator()(const value_type* p) const // We use a const argument type in order to be most flexible with what types we accept.
+ { delete const_cast<value_type*>(p); }
+ };
+
+ template <>
+ struct smart_ptr_deleter<void>
+ {
+ typedef void value_type;
+
+ void operator()(const void* p) const
+ { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type.
+ };
+
+ template <>
+ struct smart_ptr_deleter<const void>
+ {
+ typedef void value_type;
+
+ void operator()(const void* p) const
+ { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type.
+ };
+
+
+
+ /// smart_array_deleter
+ ///
+ /// Deprecated in favor of the C++11 name: default_delete
+ ///
+ template <typename T>
+ struct smart_array_deleter
+ {
+ typedef T value_type;
+
+ void operator()(const value_type* p) const // We use a const argument type in order to be most flexible with what types we accept.
+ { delete[] const_cast<value_type*>(p); }
+ };
+
+ template <>
+ struct smart_array_deleter<void>
+ {
+ typedef void value_type;
+
+ void operator()(const void* p) const
+ { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type.
+ };
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/EASTL/internal/thread_support.h b/include/EASTL/internal/thread_support.h
new file mode 100644
index 0000000..80386d2
--- /dev/null
+++ b/include/EASTL/internal/thread_support.h
@@ -0,0 +1,244 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_THREAD_SUPPORT_H
+#define EASTL_INTERNAL_THREAD_SUPPORT_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+#include <EASTL/internal/config.h>
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+// NOTE(rparolin): We need a fallback mutex implementation because the Microsoft implementation
+// of std::mutex can not be included in managed-cpp code.
+//
+// fatal error C1189: <mutex> is not supported when compiling with /clr or /clr:pure
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+#if defined(EA_HAVE_CPP11_MUTEX) && !defined(EA_COMPILER_MANAGED_CPP)
+ #define EASTL_CPP11_MUTEX_ENABLED 1
+#else
+ #define EASTL_CPP11_MUTEX_ENABLED 0
+#endif
+
+#if EASTL_CPP11_MUTEX_ENABLED
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include <mutex>
+ EA_RESTORE_ALL_VC_WARNINGS()
+#endif
+
+#if defined(EA_PLATFORM_MICROSOFT)
+ // Cannot include Windows headers in our headers, as they kill builds with their #defines.
+#elif defined(EA_PLATFORM_POSIX)
+ #include <pthread.h>
+#endif
+
+// copy constructor could not be generated because a base class copy constructor is inaccessible or deleted.
+// assignment operator could not be generated because a base class assignment operator is inaccessible or deleted.
+// non dll-interface class used as base for DLL-interface classkey 'identifier'.
+EA_DISABLE_VC_WARNING(4625 4626 4275);
+
+
+#if defined(EA_PLATFORM_MICROSOFT)
+ #if defined(EA_PROCESSOR_POWERPC)
+ extern "C" long __stdcall _InterlockedIncrement(long volatile* Addend);
+ #pragma intrinsic (_InterlockedIncrement)
+
+ extern "C" long __stdcall _InterlockedDecrement(long volatile* Addend);
+ #pragma intrinsic (_InterlockedDecrement)
+
+ extern "C" long __stdcall _InterlockedCompareExchange(long volatile* Dest, long Exchange, long Comp);
+ #pragma intrinsic (_InterlockedCompareExchange)
+ #else
+ extern "C" long _InterlockedIncrement(long volatile* Addend);
+ #pragma intrinsic (_InterlockedIncrement)
+
+ extern "C" long _InterlockedDecrement(long volatile* Addend);
+ #pragma intrinsic (_InterlockedDecrement)
+
+ extern "C" long _InterlockedCompareExchange(long volatile* Dest, long Exchange, long Comp);
+ #pragma intrinsic (_InterlockedCompareExchange)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_THREAD_SUPPORT_AVAILABLE
+//
+// Defined as 0 or 1, based on existing support.
+// Identifies if thread support (e.g. atomics, mutexes) is available for use.
+// The large majority of EASTL doesn't use thread support, but a few parts
+// of it (e.g. shared_ptr) do.
+///////////////////////////////////////////////////////////////////////////////
+
+#if !defined(EASTL_THREAD_SUPPORT_AVAILABLE)
+ #if defined(EA_COMPILER_CLANG) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003))
+ #define EASTL_THREAD_SUPPORT_AVAILABLE 1
+ #elif defined(EA_COMPILER_MSVC)
+ #define EASTL_THREAD_SUPPORT_AVAILABLE 1
+ #else
+ #define EASTL_THREAD_SUPPORT_AVAILABLE 0
+ #endif
+#endif
+
+
+namespace eastl
+{
+ namespace Internal
+ {
+ /// atomic_increment
+ /// Returns the new value.
+ inline int32_t atomic_increment(int32_t* p32) EA_NOEXCEPT
+ {
+ #if defined(EA_COMPILER_CLANG) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003))
+ return __sync_add_and_fetch(p32, 1);
+ #elif defined(EA_COMPILER_MSVC)
+ static_assert(sizeof(long) == sizeof(int32_t), "unexpected size");
+ return _InterlockedIncrement((volatile long*)p32);
+ #elif defined(EA_COMPILER_GNUC)
+ int32_t result;
+ __asm__ __volatile__ ("lock; xaddl %0, %1"
+ : "=r" (result), "=m" (*p32)
+ : "0" (1), "m" (*p32)
+ : "memory"
+ );
+ return result + 1;
+ #else
+ EASTL_FAIL_MSG("EASTL thread safety is not implemented yet. See EAThread for how to do this for the given platform.");
+ return ++*p32;
+ #endif
+ }
+
+ /// atomic_decrement
+ /// Returns the new value.
+ inline int32_t atomic_decrement(int32_t* p32) EA_NOEXCEPT
+ {
+ #if defined(EA_COMPILER_CLANG) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003))
+ return __sync_add_and_fetch(p32, -1);
+ #elif defined(EA_COMPILER_MSVC)
+ return _InterlockedDecrement((volatile long*)p32); // volatile long cast is OK because int32_t == long on Microsoft platforms.
+ #elif defined(EA_COMPILER_GNUC)
+ int32_t result;
+ __asm__ __volatile__ ("lock; xaddl %0, %1"
+ : "=r" (result), "=m" (*p32)
+ : "0" (-1), "m" (*p32)
+ : "memory"
+ );
+ return result - 1;
+ #else
+ EASTL_FAIL_MSG("EASTL thread safety is not implemented yet. See EAThread for how to do this for the given platform.");
+ return --*p32;
+ #endif
+ }
+
+
+ /// atomic_compare_and_swap
+ /// Safely sets the value to a new value if the original value is equal to
+ /// a condition value. Returns true if the condition was met and the
+ /// assignment occurred. The comparison and value setting are done as
+ /// an atomic operation and thus another thread cannot intervene between
+ /// the two as would be the case with simple C code.
+ inline bool atomic_compare_and_swap(int32_t* p32, int32_t newValue, int32_t condition)
+ {
+ #if defined(EA_COMPILER_CLANG) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003))
+ return __sync_bool_compare_and_swap(p32, condition, newValue);
+ #elif defined(EA_COMPILER_MSVC)
+ return ((int32_t)_InterlockedCompareExchange((volatile long*)p32, (long)newValue, (long)condition) == condition);
+ #elif defined(EA_COMPILER_GNUC)
+ // GCC Inline ASM Constraints
+ // r <--> Any general purpose register
+ // a <--> The a register.
+ // 1 <--> The constraint '1' for operand 2 says that it must occupy the same location as operand 1.
+ // =a <--> output registers
+ // =r <--> output registers
+
+ int32_t result;
+ __asm__ __volatile__(
+ "lock; cmpxchgl %3, (%1) \n" // Test *p32 against EAX, if same, then *p32 = newValue
+ : "=a" (result), "=r" (p32) // outputs
+ : "a" (condition), "r" (newValue), "1" (p32) // inputs
+ : "memory" // clobbered
+ );
+ return result == condition;
+ #else
+ EASTL_FAIL_MSG("EASTL thread safety is not implemented yet. See EAThread for how to do this for the given platform.");
+ if(*p32 == condition)
+ {
+ *p32 = newValue;
+ return true;
+ }
+ return false;
+ #endif
+ }
+
+
+ // mutex
+ #if EASTL_CPP11_MUTEX_ENABLED
+ using std::mutex;
+ #else
+ class EASTL_API mutex
+ {
+ public:
+ mutex();
+ ~mutex();
+
+ void lock();
+ void unlock();
+
+ protected:
+ #if defined(EA_PLATFORM_MICROSOFT)
+ #if defined(_WIN64)
+ uint64_t mMutexBuffer[40 / sizeof(uint64_t)]; // CRITICAL_SECTION is 40 bytes on Win64.
+ #elif defined(_WIN32)
+ uint32_t mMutexBuffer[24 / sizeof(uint32_t)]; // CRITICAL_SECTION is 24 bytes on Win32.
+ #endif
+ #elif defined(EA_PLATFORM_POSIX)
+ pthread_mutex_t mMutex;
+ #endif
+ };
+ #endif
+
+
+ // auto_mutex
+ class EASTL_API auto_mutex
+ {
+ public:
+ EA_FORCE_INLINE auto_mutex(mutex& mutex) : pMutex(&mutex)
+ { pMutex->lock(); }
+
+ EA_FORCE_INLINE ~auto_mutex()
+ { pMutex->unlock(); }
+
+ protected:
+ mutex* pMutex;
+
+ auto_mutex(const auto_mutex&) = delete;
+ void operator=(const auto_mutex&) = delete;
+ };
+
+
+ // shared_ptr_auto_mutex
+ class EASTL_API shared_ptr_auto_mutex : public auto_mutex
+ {
+ public:
+ shared_ptr_auto_mutex(const void* pSharedPtr);
+
+ shared_ptr_auto_mutex(const shared_ptr_auto_mutex&) = delete;
+ void operator=(shared_ptr_auto_mutex&&) = delete;
+ };
+
+
+ } // namespace Internal
+
+} // namespace eastl
+
+
+EA_RESTORE_VC_WARNING();
+
+
+#endif // Header include guard
diff --git a/include/EASTL/internal/tuple_fwd_decls.h b/include/EASTL/internal/tuple_fwd_decls.h
new file mode 100644
index 0000000..a2c773c
--- /dev/null
+++ b/include/EASTL/internal/tuple_fwd_decls.h
@@ -0,0 +1,56 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_TUPLE_FWD_DECLS_H
+#define EASTL_TUPLE_FWD_DECLS_H
+
+#include <EASTL/internal/config.h>
+
+#if EASTL_TUPLE_ENABLED
+
+namespace eastl
+{
+ template <typename... T>
+ class tuple;
+
+ template <typename Tuple>
+ class tuple_size;
+
+ template <size_t I, typename Tuple>
+ class tuple_element;
+
+ template <size_t I, typename Tuple>
+ using tuple_element_t = typename tuple_element<I, Tuple>::type;
+
+ // const typename for tuple_element_t, for when tuple or TupleImpl cannot itself be const
+ template <size_t I, typename Tuple>
+ using const_tuple_element_t = typename conditional<
+ is_lvalue_reference<tuple_element_t<I, Tuple>>::value,
+ add_lvalue_reference_t<const remove_reference_t<tuple_element_t<I, Tuple>>>,
+ const tuple_element_t<I, Tuple>
+ >::type;
+
+ // get
+ template <size_t I, typename... Ts_>
+ tuple_element_t<I, tuple<Ts_...>>& get(tuple<Ts_...>& t);
+
+ template <size_t I, typename... Ts_>
+ const_tuple_element_t<I, tuple<Ts_...>>& get(const tuple<Ts_...>& t);
+
+ template <size_t I, typename... Ts_>
+ tuple_element_t<I, tuple<Ts_...>>&& get(tuple<Ts_...>&& t);
+
+ template <typename T, typename... ts_>
+ T& get(tuple<ts_...>& t);
+
+ template <typename T, typename... ts_>
+ const T& get(const tuple<ts_...>& t);
+
+ template <typename T, typename... ts_>
+ T&& get(tuple<ts_...>&& t);
+}
+
+#endif // EASTL_VARIADIC_TEMPLATES_ENABLED
+
+#endif // EASTL_TUPLE_FWD_DECLS_H
diff --git a/include/EASTL/internal/type_compound.h b/include/EASTL/internal/type_compound.h
new file mode 100644
index 0000000..178a734
--- /dev/null
+++ b/include/EASTL/internal/type_compound.h
@@ -0,0 +1,800 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_COMPOUND_H
+#define EASTL_INTERNAL_TYPE_COMPOUND_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+
+// Until we revise the code below to handle EDG warnings, we don't have much choice but to disable them.
+#if defined(__EDG_VERSION__)
+ #pragma diag_suppress=1931 // operand of sizeof is not a type, variable, or dereferenced pointer expression
+#endif
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ // extent
+ //
+ // extent<T, I>::value is an integral type representing the number of
+ // elements in the Ith dimension of array type T.
+ //
+ // For a given array type T[N], extent<T[N]>::value == N.
+ // For a given multi-dimensional array type T[M][N], extent<T[M][N], 0>::value == N.
+ // For a given multi-dimensional array type T[M][N], extent<T[M][N], 1>::value == M.
+ // For a given array type T and a given dimension I where I >= rank<T>::value, extent<T, I>::value == 0.
+ // For a given array type of unknown extent T[], extent<T[], 0>::value == 0.
+ // For a given non-array type T and an arbitrary dimension I, extent<T, I>::value == 0.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_extent_CONFORMANCE 1 // extent is conforming.
+
+ template<typename T, unsigned N>
+ struct extent_help : public eastl::integral_constant<size_t, 0> {};
+
+ template<typename T, unsigned I>
+ struct extent_help<T[I], 0> : public eastl::integral_constant<size_t, I> {};
+
+ template<typename T, unsigned N, unsigned I>
+ struct extent_help<T[I], N> : public eastl::extent_help<T, N - 1> { };
+
+ template<typename T, unsigned N>
+ struct extent_help<T[], N> : public eastl::extent_help<T, N - 1> {};
+
+ template<typename T, unsigned N = 0> // extent uses unsigned instead of size_t.
+ struct extent : public eastl::extent_help<T, N> { };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T, unsigned N = 0>
+ EA_CONSTEXPR auto extent_v = extent<T, N>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_array
+ //
+ // is_array<T>::value == true if and only if T is an array type,
+ // including unbounded array types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_array_CONFORMANCE 1 // is_array is conforming; doesn't make mistakes.
+
+ template<typename T>
+ struct is_array : public eastl::false_type {};
+
+ template<typename T>
+ struct is_array<T[]> : public eastl::true_type {};
+
+ template<typename T, size_t N>
+ struct is_array<T[N]> : public eastl::true_type {};
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template<typename T>
+ EA_CONSTEXPR bool is_array_v = is_array<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_array_of_known_bounds
+ //
+ // Not part of the C++11 Standard.
+ // is_array_of_known_bounds<T>::value is true if T is an array and is
+ // of known bounds. is_array_of_unknown_bounds<int[3]>::value == true,
+ // while is_array_of_unknown_bounds<int[]>::value = false.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template<typename T>
+ struct is_array_of_known_bounds
+ : public eastl::integral_constant<bool, eastl::extent<T>::value != 0> {};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_array_of_unknown_bounds
+ //
+ // Not part of the C++11 Standard.
+ // is_array_of_unknown_bounds<T>::value is true if T is an array but is
+ // of unknown bounds. is_array_of_unknown_bounds<int[3]>::value == false,
+ // while is_array_of_unknown_bounds<int[]>::value = true.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template<typename T>
+ struct is_array_of_unknown_bounds
+ : public eastl::integral_constant<bool, eastl::is_array<T>::value && (eastl::extent<T>::value == 0)> {};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_member_function_pointer
+ //
+ // is_member_function_pointer<T>::value == true if and only if T is a
+ // pointer to member function type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ // We detect member functions with 0 to N arguments. We can extend this
+ // for additional arguments if necessary.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_member_function_pointer_CONFORMANCE 1 // is_member_function_pointer is conforming; doesn't make mistakes.
+
+ // To do: Revise this to support C++11 variadic templates when possible.
+ // To do: We can probably also use remove_cv to simply the multitude of types below.
+
+ template <typename T> struct is_mem_fun_pointer_value : public false_type{};
+
+ template <typename R, typename T> struct is_mem_fun_pointer_value<R (T::*)()> : public true_type{};
+ template <typename R, typename T> struct is_mem_fun_pointer_value<R (T::*)() const> : public true_type{};
+ template <typename R, typename T> struct is_mem_fun_pointer_value<R (T::*)() volatile> : public true_type{};
+ template <typename R, typename T> struct is_mem_fun_pointer_value<R (T::*)() const volatile> : public true_type{};
+
+ template <typename R, typename T, typename Arg0> struct is_mem_fun_pointer_value<R (T::*)(Arg0)> : public true_type{};
+ template <typename R, typename T, typename Arg0> struct is_mem_fun_pointer_value<R (T::*)(Arg0) const> : public true_type{};
+ template <typename R, typename T, typename Arg0> struct is_mem_fun_pointer_value<R (T::*)(Arg0) volatile> : public true_type{};
+ template <typename R, typename T, typename Arg0> struct is_mem_fun_pointer_value<R (T::*)(Arg0) const volatile> : public true_type{};
+
+ template <typename R, typename T, typename Arg0, typename Arg1> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1) volatile> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1) const volatile> : public true_type{};
+
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2) volatile> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2) const volatile> : public true_type{};
+
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3) volatile> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3) const volatile> : public true_type{};
+
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4) volatile> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4) const volatile> : public true_type{};
+
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5) volatile> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5) const volatile> : public true_type{};
+
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6) volatile> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6) const volatile> : public true_type{};
+
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6, typename Arg7> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6, Arg7)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6, typename Arg7> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6, Arg7) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6, typename Arg7> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6, Arg7) volatile> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6, typename Arg7> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6, Arg7) const volatile> : public true_type{};
+
+ template <typename T>
+ struct is_member_function_pointer : public integral_constant<bool, is_mem_fun_pointer_value<T>::value>{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_member_function_pointer_v = is_member_function_pointer<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_member_pointer
+ //
+ // is_member_pointer<T>::value == true if and only if:
+ // is_member_object_pointer<T>::value == true, or
+ // is_member_function_pointer<T>::value == true
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_member_pointer_CONFORMANCE 1 // is_member_pointer is conforming; doesn't make mistakes.
+
+ template <typename T>
+ struct is_member_pointer
+ : public eastl::integral_constant<bool, eastl::is_member_function_pointer<T>::value>{};
+
+ template <typename T, typename U>
+ struct is_member_pointer<U T::*>
+ : public eastl::true_type{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_member_pointer_v = is_member_pointer<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_member_object_pointer
+ //
+ // is_member_object_pointer<T>::value == true if and only if T is a
+ // pointer to data member type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_member_object_pointer_CONFORMANCE 1 // is_member_object_pointer is conforming; doesn't make mistakes.
+
+ template<typename T>
+ struct is_member_object_pointer : public eastl::integral_constant<bool,
+ eastl::is_member_pointer<T>::value &&
+ !eastl::is_member_function_pointer<T>::value
+ > {};
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_member_object_pointer_v = is_member_object_pointer<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_pointer
+ //
+ // is_pointer<T>::value == true if and only if T is a pointer type.
+ // This category includes function pointer types, but not pointer to
+ // member types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_pointer_CONFORMANCE 1 // is_pointer is conforming; doesn't make mistakes.
+
+ template <typename T> struct is_pointer_helper : public false_type{};
+
+ template <typename T> struct is_pointer_helper<T*> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* const> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* volatile> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* const volatile> : public true_type{};
+
+ template <typename T>
+ struct is_pointer_value : public type_and<is_pointer_helper<T>::value, type_not<is_member_pointer<T>::value>::value> {};
+
+ template <typename T>
+ struct is_pointer : public integral_constant<bool, is_pointer_value<T>::value>{};
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template<typename T>
+ EA_CONSTEXPR bool is_pointer_v = is_pointer<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_convertible
+ //
+ // Given two (possible identical) types From and To, is_convertible<From, To>::value == true
+ // if and only if an lvalue of type From can be implicitly converted to type To,
+ // or is_void<To>::value == true
+ //
+ // An instance of the type predicate holds true if the expression To to = from;, where from is an object of type From, is well-formed.
+ //
+ // is_convertible may only be applied to complete types.
+ // Type To may not be an abstract type.
+ // If the conversion is ambiguous, the program is ill-formed.
+ // If either or both of From and To are class types, and the conversion would invoke
+ // non-public member functions of either From or To (such as a private constructor of To,
+ // or a private conversion operator of From), the program is ill-formed.
+ //
+ // Note that without compiler help, both is_convertible and is_base
+ // can produce compiler errors if the conversion is ambiguous.
+ // Example:
+ // struct A {};
+ // struct B : A {};
+ // struct C : A {};
+ // struct D : B, C {};
+ // is_convertible<D*, A*>::value; // Generates compiler error.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_convertible_to)))
+ #define EASTL_TYPE_TRAIT_is_convertible_CONFORMANCE 1 // is_convertible is conforming.
+
+ // Problem: VC++ reports that int is convertible to short, yet if you construct a short from an int then VC++ generates a warning:
+ // warning C4242: 'initializing' : conversion from 'int' to 'short', possible loss of data. We can deal with this by making
+ // is_convertible be false for conversions that could result in loss of data. Or we could make another trait called is_lossless_convertible
+ // and use that appropriately in our code. Or we could put the onus on the user to work around such warnings.
+ template <typename From, typename To>
+ struct is_convertible : public integral_constant<bool, __is_convertible_to(From, To)>{};
+
+ #else
+ #define EASTL_TYPE_TRAIT_is_convertible_CONFORMANCE 1
+
+ template<typename From, typename To, bool = eastl::is_void<From>::value || eastl::is_function<To>::value || eastl::is_array<To>::value >
+ struct is_convertible_helper // Anything is convertible to void. Nothing is convertible to a function or an array.
+ { static const bool value = eastl::is_void<To>::value; };
+
+ template<typename From, typename To>
+ class is_convertible_helper<From, To, false>
+ {
+ template<typename To1>
+ static void ToFunction(To1); // We try to call this function with an instance of From. It is valid if From can be converted to To.
+
+ template<typename /*From1*/, typename /*To1*/>
+ static eastl::no_type is(...);
+
+ template<typename From1, typename To1>
+ static decltype(ToFunction<To1>(eastl::declval<From1>()), eastl::yes_type()) is(int);
+
+ public:
+ static const bool value = sizeof(is<From, To>(0)) == 1;
+ };
+
+ template<typename From, typename To>
+ struct is_convertible
+ : public integral_constant<bool, is_convertible_helper<From, To>::value> {};
+
+ #endif
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template<typename From, typename To>
+ EA_CONSTEXPR bool is_convertible_v = is_convertible<From, To>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_convertible
+ //
+ // https://en.cppreference.com/w/cpp/types/is_convertible
+ //
+ // template<typename From, typename To>
+ // struct is_explicitly_convertible
+ // : public is_constructible<To, From> {};
+ ///////////////////////////////////////////////////////////////////////
+ // TODO(rparolin): implement type-trait
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_explicitly_convertible
+ //
+ // This sometime-seen extension trait is the same as is_constructible
+ // and so we don't define it.
+ //
+ // template<typename From, typename To>
+ // struct is_explicitly_convertible
+ // : public is_constructible<To, From> {};
+ ///////////////////////////////////////////////////////////////////////
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_union
+ //
+ // is_union<T>::value == true if and only if T is a union type.
+ //
+ // There is no way to tell if a type is a union without compiler help.
+ // As of this writing, only Metrowerks v8+ supports such functionality
+ // via 'msl::is_union<T>::value'. The user can force something to be
+ // evaluated as a union via EASTL_DECLARE_UNION.
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_union)))
+ #define EASTL_TYPE_TRAIT_is_union_CONFORMANCE 1 // is_union is conforming.
+
+ template <typename T>
+ struct is_union : public integral_constant<bool, __is_union(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_union_CONFORMANCE 0 // is_union is not fully conforming.
+
+ template <typename T> struct is_union : public false_type{};
+ #endif
+
+ #define EASTL_DECLARE_UNION(T) namespace eastl{ template <> struct is_union<T> : public true_type{}; template <> struct is_union<const T> : public true_type{}; }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_union_v = is_union<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_class
+ //
+ // is_class<T>::value == true if and only if T is a class or struct
+ // type (and not a union type).
+ //
+ // Without specific compiler help, it is not possible to
+ // distinguish between unions and classes. As a result, is_class
+ // will erroneously evaluate to true for union types.
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_class)))
+ #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE 1 // is_class is conforming.
+
+ template <typename T>
+ struct is_class : public integral_constant<bool, __is_class(T)>{};
+ #elif defined(__EDG__)
+ #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE EASTL_TYPE_TRAIT_is_union_CONFORMANCE
+
+ typedef char yes_array_type[1];
+ typedef char no_array_type[2];
+ template <typename U> static yes_array_type& is_class_helper(void (U::*)());
+ template <typename U> static no_array_type& is_class_helper(...);
+
+ template <typename T>
+ struct is_class : public integral_constant<bool,
+ sizeof(is_class_helper<T>(0)) == sizeof(yes_array_type) && !is_union<T>::value
+ >{};
+ #elif !defined(__GNUC__) || (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) // Not GCC or GCC 3.4+
+ #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE EASTL_TYPE_TRAIT_is_union_CONFORMANCE
+
+ template <typename U> static yes_type is_class_helper(void (U::*)());
+ template <typename U> static no_type is_class_helper(...);
+
+ template <typename T>
+ struct is_class : public integral_constant<bool,
+ sizeof(is_class_helper<T>(0)) == sizeof(yes_type) && !is_union<T>::value
+ >{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE 0 // is_class is not fully conforming.
+
+ // GCC 2.x version, due to GCC being broken.
+ template <typename T>
+ struct is_class : public false_type{};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_class_v = is_class<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_enum
+ //
+ // is_enum<T>::value == true if and only if T is an enumeration type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_enum)))
+ #define EASTL_TYPE_TRAIT_is_enum_CONFORMANCE 1 // is_enum is conforming.
+
+ template <typename T>
+ struct is_enum : public integral_constant<bool, __is_enum(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_enum_CONFORMANCE 1 // is_enum is conforming.
+
+ struct int_convertible{ int_convertible(int); };
+
+ template <bool is_arithmetic_or_reference>
+ struct is_enum_helper { template <typename T> struct nest : public is_convertible<T, int_convertible>{}; };
+
+ template <>
+ struct is_enum_helper<true> { template <typename T> struct nest : public false_type {}; };
+
+ template <typename T>
+ struct is_enum_helper2
+ {
+ typedef type_or<is_arithmetic<T>::value, is_reference<T>::value, is_class<T>::value> selector;
+ typedef is_enum_helper<selector::value> helper_t;
+ typedef typename add_reference<T>::type ref_t;
+ typedef typename helper_t::template nest<ref_t> result;
+ };
+
+ template <typename T>
+ struct is_enum : public integral_constant<bool, is_enum_helper2<T>::result::value>{};
+
+ template <> struct is_enum<void> : public false_type {};
+ template <> struct is_enum<void const> : public false_type {};
+ template <> struct is_enum<void volatile> : public false_type {};
+ template <> struct is_enum<void const volatile> : public false_type {};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_enum_v = is_enum<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_ENUM(T) namespace eastl{ template <> struct is_enum<T> : public true_type{}; template <> struct is_enum<const T> : public true_type{}; }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_polymorphic
+ //
+ // is_polymorphic<T>::value == true if and only if T is a class or struct
+ // that declares or inherits a virtual function. is_polymorphic may only
+ // be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_polymorphic)))
+ #define EASTL_TYPE_TRAIT_is_polymorphic_CONFORMANCE 1 // is_polymorphic is conforming.
+
+ template <typename T>
+ struct is_polymorphic : public integral_constant<bool, __is_polymorphic(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_polymorphic_CONFORMANCE 1 // is_polymorphic is conforming.
+
+ template <typename T>
+ struct is_polymorphic_imp1
+ {
+ typedef typename remove_cv<T>::type t;
+
+ struct helper_1 : public t
+ {
+ helper_1();
+ ~helper_1() throw();
+ char pad[64];
+ };
+
+ struct helper_2 : public t
+ {
+ helper_2();
+ virtual ~helper_2() throw();
+ #ifndef _MSC_VER
+ virtual void foo();
+ #endif
+ char pad[64];
+ };
+
+ static const bool value = (sizeof(helper_1) == sizeof(helper_2));
+ };
+
+ template <typename T>
+ struct is_polymorphic_imp2{ static const bool value = false; };
+
+ template <bool is_class>
+ struct is_polymorphic_selector{ template <typename T> struct rebind{ typedef is_polymorphic_imp2<T> type; }; };
+
+ template <>
+ struct is_polymorphic_selector<true>{ template <typename T> struct rebind{ typedef is_polymorphic_imp1<T> type; }; };
+
+ template <typename T>
+ struct is_polymorphic_value{
+ typedef is_polymorphic_selector<is_class<T>::value> selector;
+ typedef typename selector::template rebind<T> binder;
+ typedef typename binder::type imp_type;
+ static const bool value = imp_type::value;
+ };
+
+ template <typename T>
+ struct is_polymorphic : public integral_constant<bool, is_polymorphic_value<T>::value>{};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_polymorphic_v = is_polymorphic<T>::value;
+ #endif
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_object
+ //
+ // is_object<T>::value == true if and only if:
+ // is_reference<T>::value == false, and
+ // is_function<T>::value == false, and
+ // is_void<T>::value == false
+ //
+ // The C++ standard, section 3.9p9, states: "An object type is a
+ // (possibly cv-qualified) type that is not a function type, not a
+ // reference type, and not incomplete (except for an incompletely
+ // defined object type).
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_object_CONFORMANCE (EASTL_TYPE_TRAIT_is_reference_CONFORMANCE && EASTL_TYPE_TRAIT_is_void_CONFORMANCE && EASTL_TYPE_TRAIT_is_function_CONFORMANCE)
+
+ template <typename T>
+ struct is_object : public integral_constant<bool,
+ !is_reference<T>::value && !is_void<T>::value && !is_function<T>::value
+ >{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_object_v = is_object<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_scalar
+ //
+ // is_scalar<T>::value == true if and only if:
+ // is_arithmetic<T>::value == true, or
+ // is_enum<T>::value == true, or
+ // is_pointer<T>::value == true, or
+ // is_member_pointer<T>::value == true, or
+ // is_null_pointer<T>::value == true
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_scalar_CONFORMANCE 1 // is_scalar is conforming.
+
+ template <typename T>
+ struct is_scalar : public integral_constant<bool,
+ is_arithmetic<T>::value || is_enum<T>::value || is_pointer<T>::value ||
+ is_member_pointer<T>::value ||
+ is_null_pointer<T>::value> {};
+
+ template <typename T> struct is_scalar<T*> : public true_type {};
+ template <typename T> struct is_scalar<T* const> : public true_type {};
+ template <typename T> struct is_scalar<T* volatile> : public true_type {};
+ template <typename T> struct is_scalar<T* const volatile> : public true_type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_scalar_v = is_scalar<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_compound
+ //
+ // Compound means anything but fundamental. See C++ standard, section 3.9.2.
+ //
+ // is_compound<T>::value == true if and only if:
+ // is_fundamental<T>::value == false
+ //
+ // Thus, is_compound<T>::value == true if and only if:
+ // is_floating_point<T>::value == false, and
+ // is_integral<T>::value == false, and
+ // is_void<T>::value == false
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_compound_CONFORMANCE EASTL_TYPE_TRAIT_is_fundamental_CONFORMANCE
+
+ template <typename T>
+ struct is_compound : public integral_constant<bool, !is_fundamental<T>::value>{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_compound_v = is_compound<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // decay
+ //
+ // Converts the type T to its decayed equivalent. That means doing
+ // lvalue to rvalue, array to pointer, function to pointer conversions,
+ // and removal of const and volatile.
+ // This is the type conversion silently applied by the compiler to
+ // all function arguments when passed by value.
+
+ #define EASTL_TYPE_TRAIT_decay_CONFORMANCE 1 // decay is conforming.
+
+ template<typename T>
+ struct decay
+ {
+ typedef typename eastl::remove_reference<T>::type U;
+
+ typedef typename eastl::conditional<
+ eastl::is_array<U>::value,
+ typename eastl::remove_extent<U>::type*,
+ typename eastl::conditional<
+ eastl::is_function<U>::value,
+ typename eastl::add_pointer<U>::type,
+ typename eastl::remove_cv<U>::type
+ >::type
+ >::type type;
+ };
+
+
+ // decay_t is the C++14 using typedef for typename decay<T>::type, though
+ // it requires only C++11 compiler functionality to implement.
+ // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers.
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_DECAY_T(T) typename decay<T>::type
+ #else
+ template<typename T>
+ using decay_t = typename decay<T>::type;
+ #define EASTL_DECAY_T(T) decay_t<T>
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // common_type
+ //
+ // Determines the common type among all types T..., that is the type all T...
+ // can be implicitly converted to.
+ //
+ // It is intended that this be specialized by the user for cases where it
+ // is useful to do so. Example specialization:
+ // template <typename Class1, typename Class2>
+ // struct common_type<MyClass1, MyClass2>{ typedef MyBaseClassB type; };
+ //
+ // The member typedef type shall be defined as set out in 20.9.7.6,p3. All types in
+ // the parameter pack T shall be complete or (possibly cv) void. A program may
+ // specialize this trait if at least one template parameter in the specialization
+ // is a user-defined type. Note: Such specializations are needed when only
+ // explicit conversions are desired among the template arguments.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_common_type_CONFORMANCE 1 // common_type is conforming.
+
+ template<typename... T>
+ struct common_type;
+
+ template<typename T>
+ struct common_type<T>
+ { typedef decay_t<T> type; }; // Question: Should we use T or decay_t<T> here? The C++11 Standard specifically (20.9.7.6,p3) specifies that it be without decay, but libc++ uses decay.
+
+ template<typename T, typename U>
+ struct common_type<T, U>
+ {
+ typedef decay_t<decltype(true ? declval<T>() : declval<U>())> type; // The type of a tertiary expression is set by the compiler to be the common type of the two result types.
+ };
+
+ template<typename T, typename U, typename... V>
+ struct common_type<T, U, V...>
+ { typedef typename common_type<typename common_type<T, U>::type, V...>::type type; };
+
+
+ // common_type_t is the C++14 using typedef for typename common_type<T...>::type.
+ // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers.
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_COMMON_TYPE_T(...) typename common_type<__VA_ARGS__>::type
+ #else
+ template <typename... T>
+ using common_type_t = typename common_type<T...>::type;
+ #define EASTL_COMMON_TYPE_T(...) common_type_t<__VA_ARGS__>
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_final
+ ///////////////////////////////////////////////////////////////////////
+ #if EA_COMPILER_HAS_FEATURE(is_final)
+ template <typename T>
+ struct is_final : public integral_constant<bool, __is_final(T)> {};
+ #else
+ // no compiler support so we always return false
+ template <typename T>
+ struct is_final : public false_type {};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_final_v = is_final<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_aggregate
+ //
+ // https://en.cppreference.com/w/cpp/language/aggregate_initialization
+ //
+ // An aggregate is one of the following types:
+ // * array type
+ // * class type (typically, struct or union), that has
+ // * no private or protected non-static data members
+ // * no user-provided constructors (explicitly defaulted or deleted constructors are allowed)
+ // * no user-provided, inherited, or explicit constructors
+ // * (explicitly defaulted or deleted constructors are allowed)
+ // * no virtual, private, or protected (since C++17) base classes
+ // * no virtual member functions
+ // * no default member initializers
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #if EA_COMPILER_HAS_FEATURE(is_aggregate) || defined(_MSC_VER) && (_MSC_VER >= 1916) // VS2017 15.9+
+ #define EASTL_TYPE_TRAIT_is_aggregate_CONFORMANCE 1
+
+ template <typename T>
+ struct is_aggregate : public integral_constant<bool, __is_aggregate(T)> {};
+ #else
+ #define EASTL_TYPE_TRAIT_is_aggregate_CONFORMANCE 0
+
+ // no compiler support so we always return false
+ template <typename T>
+ struct is_aggregate : public false_type {};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename T>
+ EA_CONSTEXPR bool is_aggregate_v = is_aggregate<T>::value;
+ #endif
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
diff --git a/include/EASTL/internal/type_fundamental.h b/include/EASTL/internal/type_fundamental.h
new file mode 100644
index 0000000..950d15e
--- /dev/null
+++ b/include/EASTL/internal/type_fundamental.h
@@ -0,0 +1,289 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_FUNDAMENTAL_H
+#define EASTL_INTERNAL_TYPE_FUNDAMENTAL_H
+
+
+#include <EABase/eabase.h>
+#include <EABase/nullptr.h>
+
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+namespace eastl
+{
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_void
+ //
+ // is_void<T>::value == true if and only if T is one of the following types:
+ // [const][volatile] void
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_void_CONFORMANCE 1 // is_void is conforming.
+
+ template <typename T> struct is_void : public false_type{};
+
+ template <> struct is_void<void> : public true_type{};
+ template <> struct is_void<void const> : public true_type{};
+ template <> struct is_void<void volatile> : public true_type{};
+ template <> struct is_void<void const volatile> : public true_type{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_void_v = is_void<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_void_arg
+ //
+ // utility which identifies if any of the given template arguments is void.
+ //
+ // TODO(rparolin): refactor with fold expressions when C++17 compilers are widely available.
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename ...Args>
+ struct has_void_arg;
+
+ template <>
+ struct has_void_arg<>
+ : public eastl::false_type {};
+
+ template <typename A0, typename ...Args>
+ struct has_void_arg<A0, Args...>
+ { static const bool value = (eastl::is_void<A0>::value || eastl::has_void_arg<Args...>::value); };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_null_pointer
+ //
+ // C++14 type trait. Refers only to nullptr_t and not NULL (0).
+ // eastl::is_null_pointer<nullptr>::value == true
+ // eastl::is_null_pointer<std::nullptr_t>::value == true
+ // eastl::is_null_pointer<void*>::value == false
+ // eastl::is_null_pointer<NULL>::value == [cannot compile]
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(EA_COMPILER_CPP11_ENABLED) && !defined(EA_COMPILER_NO_DECLTYPE) && !defined(_MSC_VER) // VC++'s handling of decltype(nullptr) is broken.
+ #define EASTL_TYPE_TRAIT_is_null_pointer_CONFORMANCE 1
+
+ template <typename T>
+ struct is_null_pointer : public eastl::is_same<typename eastl::remove_cv<T>::type, decltype(nullptr)> {}; // A C++11 compiler defines nullptr, but you need a C++11 standard library to declare std::nullptr_t. So it's safer to compare against decltype(nullptr) than to use std::nullptr_t, because we may have a C++11 compiler but C++98 library (happens with Apple frequently).
+ #else
+ #define EASTL_TYPE_TRAIT_is_null_pointer_CONFORMANCE 1
+
+ template <typename T>
+ struct is_null_pointer : public eastl::is_same<typename eastl::remove_cv<T>::type, std::nullptr_t> {};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_null_pointer_v = is_null_pointer<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_integral
+ //
+ // is_integral<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] bool
+ // [const] [volatile] char
+ // [const] [volatile] signed char
+ // [const] [volatile] unsigned char
+ // [const] [volatile] wchar_t
+ // [const] [volatile] short
+ // [const] [volatile] int
+ // [const] [volatile] long
+ // [const] [volatile] long long
+ // [const] [volatile] unsigned short
+ // [const] [volatile] unsigned int
+ // [const] [volatile] unsigned long
+ // [const] [volatile] unsigned long long
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_integral_CONFORMANCE 1 // is_integral is conforming.
+
+ template <typename T> struct is_integral_helper : public false_type{};
+
+ template <> struct is_integral_helper<unsigned char> : public true_type{};
+ template <> struct is_integral_helper<unsigned short> : public true_type{};
+ template <> struct is_integral_helper<unsigned int> : public true_type{};
+ template <> struct is_integral_helper<unsigned long> : public true_type{};
+ template <> struct is_integral_helper<unsigned long long> : public true_type{};
+
+ template <> struct is_integral_helper<signed char> : public true_type{};
+ template <> struct is_integral_helper<signed short> : public true_type{};
+ template <> struct is_integral_helper<signed int> : public true_type{};
+ template <> struct is_integral_helper<signed long> : public true_type{};
+ template <> struct is_integral_helper<signed long long> : public true_type{};
+
+ template <> struct is_integral_helper<bool> : public true_type{};
+ template <> struct is_integral_helper<char> : public true_type{};
+ #if defined(EA_CHAR16_NATIVE) && EA_CHAR16_NATIVE
+ template <> struct is_integral_helper<char16_t> : public true_type{};
+ #endif
+ #if defined(EA_CHAR32_NATIVE) && EA_CHAR32_NATIVE
+ template <> struct is_integral_helper<char32_t> : public true_type{};
+ #endif
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type which is already handled above...
+ template <> struct is_integral_helper<wchar_t> : public true_type{};
+ #endif
+ #if EASTL_INT128_SUPPORTED && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ template <> struct is_integral_helper<__int128_t> : public true_type{};
+ template <> struct is_integral_helper<__uint128_t> : public true_type{};
+ #endif
+
+ template <typename T>
+ struct is_integral : public eastl::is_integral_helper<typename eastl::remove_cv<T>::type>{};
+
+ #define EASTL_DECLARE_INTEGRAL(T) \
+ namespace eastl{ \
+ template <> struct is_integral<T> : public true_type{}; \
+ template <> struct is_integral<const T> : public true_type{}; \
+ template <> struct is_integral<volatile T> : public true_type{}; \
+ template <> struct is_integral<const volatile T> : public true_type{}; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_integral_v = is_integral<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_floating_point
+ //
+ // is_floating_point<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] float
+ // [const] [volatile] double
+ // [const] [volatile] long double
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_floating_point_CONFORMANCE 1 // is_floating_point is conforming.
+
+ template <typename T> struct is_floating_point_helper : public false_type{};
+
+ template <> struct is_floating_point_helper<float> : public true_type{};
+ template <> struct is_floating_point_helper<double> : public true_type{};
+ template <> struct is_floating_point_helper<long double> : public true_type{};
+
+ template <typename T>
+ struct is_floating_point : public eastl::is_floating_point_helper<typename eastl::remove_cv<T>::type>{};
+
+ #define EASTL_DECLARE_FLOATING_POINT(T) \
+ namespace eastl{ \
+ template <> struct is_floating_point<T> : public true_type{}; \
+ template <> struct is_floating_point<const T> : public true_type{}; \
+ template <> struct is_floating_point<volatile T> : public true_type{}; \
+ template <> struct is_floating_point<const volatile T> : public true_type{}; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_floating_point_v = is_floating_point<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_arithmetic
+ //
+ // is_arithmetic<T>::value == true if and only if:
+ // is_floating_point<T>::value == true, or
+ // is_integral<T>::value == true
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_arithmetic_CONFORMANCE 1 // is_arithmetic is conforming.
+
+ template <typename T>
+ struct is_arithmetic
+ : public integral_constant<bool, is_integral<T>::value || is_floating_point<T>::value> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_arithmetic_v = is_arithmetic<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_fundamental
+ //
+ // is_fundamental<T>::value == true if and only if:
+ // is_floating_point<T>::value == true, or
+ // is_integral<T>::value == true, or
+ // is_void<T>::value == true
+ // is_null_pointer<T>::value == true
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_fundamental_CONFORMANCE 1 // is_fundamental is conforming.
+
+ template <typename T>
+ struct is_fundamental
+ : public bool_constant<is_void_v<T> || is_integral_v<T> || is_floating_point_v<T> || is_null_pointer_v<T>> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_fundamental_v = is_fundamental<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_hat_type
+ //
+ // is_hat_type<T>::value == true if and only if:
+ // underlying type is a C++/CX '^' type such as: Foo^
+ // meaning the type is heap allocated and ref-counted
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T> struct is_hat_type_helper : public false_type {};
+
+ #if (EABASE_VERSION_N > 20607 && defined(EA_COMPILER_WINRTCX_ENABLED)) || defined(__cplusplus_winrt)
+ template <typename T> struct is_hat_type_helper<T^> : public true_type{};
+ #endif
+
+ template <typename T>
+ struct is_hat_type : public eastl::is_hat_type_helper<T> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_hat_type_v = is_hat_type<T>::value;
+ #endif
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/include/EASTL/internal/type_pod.h b/include/EASTL/internal/type_pod.h
new file mode 100644
index 0000000..8726a7e
--- /dev/null
+++ b/include/EASTL/internal/type_pod.h
@@ -0,0 +1,1945 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_POD_H
+#define EASTL_INTERNAL_TYPE_POD_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <limits.h>
+#include <EASTL/type_traits.h>
+
+namespace eastl
+{
+ ///////////////////////////////////////////////////////////////////////
+ // is_empty
+ //
+ // is_empty<T>::value == true if and only if T is an empty class or struct.
+ // is_empty may only be applied to complete types.
+ //
+ // is_empty cannot be used with union types until is_union can be made to work.
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_empty)))
+ #define EASTL_TYPE_TRAIT_is_empty_CONFORMANCE 1 // is_empty is conforming.
+
+ template <typename T>
+ struct is_empty : public integral_constant<bool, __is_empty(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_empty_CONFORMANCE 1 // is_empty is fully conforming.
+
+ template <typename T>
+ struct is_empty_helper_t1 : public T { char m[64]; };
+ struct is_empty_helper_t2 { char m[64]; };
+
+ // The inheritance in empty_helper_t1 will not work with non-class types
+ template <typename T, bool is_a_class = false>
+ struct is_empty_helper : public eastl::false_type{};
+
+ template <typename T>
+ struct is_empty_helper<T, true> : public eastl::integral_constant<bool,
+ sizeof(is_empty_helper_t1<T>) == sizeof(is_empty_helper_t2)
+ >{};
+
+ template <typename T>
+ struct is_empty_helper2
+ {
+ typedef typename eastl::remove_cv<T>::type _T;
+ typedef eastl::is_empty_helper<_T, eastl::is_class<_T>::value> type;
+ };
+
+ template <typename T>
+ struct is_empty : public eastl::is_empty_helper2<T>::type {};
+ #endif
+
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_empty_v = is_empty<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_pod
+ //
+ // is_pod<T>::value == true if and only if, for a given type T:
+ // - is_scalar<T>::value == true, or
+ // - T is a class or struct that has no user-defined copy assignment
+ // operator or destructor, and T has no non-static data members M for
+ // which is_pod<M>::value == false, and no members of reference type, or
+ // - T is the type of an array of objects E for which is_pod<E>::value == true
+ //
+ // is_pod may only be applied to complete types.
+ //
+ // Without some help from the compiler or user, is_pod will not report
+ // that a struct or class is a POD, but will correctly report that
+ // built-in types such as int are PODs. The user can help the compiler
+ // by using the EASTL_DECLARE_POD macro on a class.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(EA_COMPILER_MSVC)
+ #define EASTL_TYPE_TRAIT_is_pod_CONFORMANCE 1 // is_pod is conforming. Actually as of VS2008 it is apparently not fully conforming, as it flags the following as a non-pod: struct Pod{ Pod(){} };
+
+ EA_DISABLE_VC_WARNING(4647)
+ template <typename T> // We check for has_trivial_constructor only because the VC++ is_pod does. Is it due to some compiler bug?
+ struct is_pod : public eastl::integral_constant<bool, (__has_trivial_constructor(T) && __is_pod(T) && !eastl::is_hat_type<T>::value) || eastl::is_void<T>::value || eastl::is_scalar<T>::value>{};
+ EA_RESTORE_VC_WARNING()
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_pod)))
+ #define EASTL_TYPE_TRAIT_is_pod_CONFORMANCE 1 // is_pod is conforming.
+
+ template <typename T>
+ struct is_pod : public eastl::integral_constant<bool, __is_pod(T) || eastl::is_void<T>::value || eastl::is_scalar<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_pod_CONFORMANCE 0 // is_pod is not conforming. Can return false negatives.
+
+ template <typename T> // There's not much we can do here without some compiler extension.
+ struct is_pod : public eastl::integral_constant<bool, eastl::is_void<T>::value || eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value>{};
+ #endif
+
+ template <typename T, size_t N>
+ struct is_pod<T[N]> : public is_pod<T>{};
+
+ template <typename T>
+ struct is_POD : public is_pod<T>{}; // Backwards compatibility.
+
+ #define EASTL_DECLARE_IS_POD(T, isPod) \
+ namespace eastl { \
+ template <> struct is_pod<T> : public eastl::integral_constant<bool, isPod> { }; \
+ template <> struct is_pod<const T> : public eastl::integral_constant<bool, isPod> { }; \
+ template <> struct is_pod<volatile T> : public eastl::integral_constant<bool, isPod> { }; \
+ template <> struct is_pod<const volatile T> : public eastl::integral_constant<bool, isPod> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_POD(T) namespace eastl{ template <> struct is_pod<T> : public true_type{}; template <> struct is_pod<const T> : public true_type{}; }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_pod_v = is_pod<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_standard_layout
+ //
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(EA_COMPILER_MSVC) && (_MSC_VER >= 1700)) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006)) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_standard_layout)))
+ #define EASTL_TYPE_TRAIT_is_standard_layout_CONFORMANCE 1 // is_standard_layout is conforming.
+
+ template <typename T>
+ struct is_standard_layout : public eastl::integral_constant<bool, __is_standard_layout(T) || eastl::is_void<T>::value || eastl::is_scalar<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_standard_layout_CONFORMANCE 0 // is_standard_layout is not conforming. Can return false negatives.
+
+ template <typename T> // There's not much we can do here without some compiler extension.
+ struct is_standard_layout : public eastl::integral_constant<bool, is_void<T>::value || is_scalar<T>::value>{};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_standard_layout_v = is_standard_layout<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_IS_STANDARD_LAYOUT(T, isStandardLayout) \
+ namespace eastl { \
+ template <> struct is_standard_layout<T> : public eastl::integral_constant<bool, isStandardLayout> { }; \
+ template <> struct is_standard_layout<const T> : public eastl::integral_constant<bool, isStandardLayout> { }; \
+ template <> struct is_standard_layout<volatile T> : public eastl::integral_constant<bool, isStandardLayout> { }; \
+ template <> struct is_standard_layout<const volatile T> : public eastl::integral_constant<bool, isStandardLayout> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_STANDARD_LAYOUT(T) namespace eastl{ template <> struct is_standard_layout<T> : public true_type{}; template <> struct is_standard_layout<const T> : public true_type{}; }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_constructor
+ //
+ // has_trivial_constructor<T>::value == true if and only if T is a class
+ // or struct that has a trivial constructor. A constructor is trivial if
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_constructor<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_constructor<M>::value == true,
+ // where M is the type of the data member
+ //
+ // has_trivial_constructor may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_constructor will not
+ // report that a class or struct has a trivial constructor.
+ // The user can use EASTL_DECLARE_TRIVIAL_CONSTRUCTOR to help the compiler.
+ //
+ // A default constructor for a class X is a constructor of class X that
+ // can be called without an argument.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1600) // VS2010+
+ #define EASTL_TYPE_TRAIT_has_trivial_constructor_CONFORMANCE 1 // has_trivial_constructor is conforming.
+
+ template <typename T>
+ struct has_trivial_constructor : public eastl::integral_constant<bool, (__has_trivial_constructor(T) || eastl::is_pod<T>::value) && !eastl::is_hat_type<T>::value>{};
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ #define EASTL_TYPE_TRAIT_has_trivial_constructor_CONFORMANCE 1 // has_trivial_constructor is conforming.
+
+ template <typename T>
+ struct has_trivial_constructor : public eastl::integral_constant<bool, __has_trivial_constructor(T) || eastl::is_pod<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_trivial_constructor_CONFORMANCE 0 // has_trivial_constructor is not fully conforming. Can return false negatives.
+
+ // With current compilers, this is all we can do.
+ template <typename T>
+ struct has_trivial_constructor : public eastl::is_pod<T> {};
+ #endif
+
+ #define EASTL_DECLARE_HAS_TRIVIAL_CONSTRUCTOR(T, hasTrivialConstructor) \
+ namespace eastl { \
+ template <> struct has_trivial_constructor<T> : public eastl::integral_constant<bool, hasTrivialConstructor> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_TRIVIAL_CONSTRUCTOR(T) namespace eastl{ template <> struct has_trivial_constructor<T> : public true_type{}; template <> struct has_trivial_constructor<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_copy
+ //
+ // has_trivial_copy<T>::value == true if and only if T is a class or
+ // struct that has a trivial copy constructor. A copy constructor is
+ // trivial if
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_copy<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_copy<M>::value == true, where M is the
+ // type of the data member
+ //
+ // has_trivial_copy may only be applied to complete types.
+ //
+ // Another way of looking at this is:
+ // A copy constructor for class X is trivial if it is implicitly
+ // declared and if all the following are true:
+ // - Class X has no virtual functions (10.3) and no virtual base classes (10.1).
+ // - Each direct base class of X has a trivial copy constructor.
+ // - For all the nonstatic data members of X that are of class type
+ // (or array thereof), each such class type has a trivial copy constructor;
+ // otherwise the copy constructor is nontrivial.
+ //
+ // Without help from the compiler or user, has_trivial_copy will not report
+ // that a class or struct has a trivial copy constructor. The user can
+ // use EASTL_DECLARE_TRIVIAL_COPY to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER)
+ #define EASTL_TYPE_TRAIT_has_trivial_copy_CONFORMANCE 1 // has_trivial_copy is conforming.
+
+ template <typename T>
+ struct has_trivial_copy : public eastl::integral_constant<bool, (__has_trivial_copy(T) || eastl::is_pod<T>::value) && !eastl::is_volatile<T>::value && !eastl::is_hat_type<T>::value>{};
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ #define EASTL_TYPE_TRAIT_has_trivial_copy_CONFORMANCE 1 // has_trivial_copy is conforming.
+
+ template <typename T>
+ struct has_trivial_copy : public eastl::integral_constant<bool, (__has_trivial_copy(T) || eastl::is_pod<T>::value) && (!eastl::is_volatile<T>::value && !eastl::is_reference<T>::value)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_trivial_copy_CONFORMANCE 0 // has_trivial_copy is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_trivial_copy : public eastl::integral_constant<bool, eastl::is_pod<T>::value && !eastl::is_volatile<T>::value>{};
+ #endif
+
+ #define EASTL_DECLARE_HAS_TRIVIAL_COPY(T, hasTrivialCopy) \
+ namespace eastl { \
+ template <> struct has_trivial_copy<T> : public eastl::integral_constant<bool, hasTrivialCopy> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_TRIVIAL_COPY(T) namespace eastl{ template <> struct has_trivial_copy<T> : public true_type{}; template <> struct has_trivial_copy<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_assign
+ //
+ // has_trivial_assign<T>::value == true if and only if T is a class or
+ // struct that has a trivial copy assignment operator. A copy assignment
+ // operator is trivial if:
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_assign<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_assign<M>::value == true, where M is
+ // the type of the data member.
+ //
+ // has_trivial_assign may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_assign will not
+ // report that a class or struct has trivial assignment. The user
+ // can use EASTL_DECLARE_TRIVIAL_ASSIGN to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1600)
+ #define EASTL_TYPE_TRAIT_has_trivial_assign_CONFORMANCE 1 // has_trivial_assign is conforming.
+
+ template <typename T>
+ struct has_trivial_assign : public integral_constant<bool, (__has_trivial_assign(T) || eastl::is_pod<T>::value) && !eastl::is_const<T>::value && !eastl::is_volatile<T>::value && !eastl::is_hat_type<T>::value>{};
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ #define EASTL_TYPE_TRAIT_has_trivial_assign_CONFORMANCE 1 // has_trivial_assign is conforming.
+
+ template <typename T>
+ struct has_trivial_assign : public integral_constant<bool, (__has_trivial_assign(T) || eastl::is_pod<T>::value) && !eastl::is_const<T>::value && !eastl::is_volatile<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_trivial_assign_CONFORMANCE 0 // is_pod is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_trivial_assign : public integral_constant<bool,
+ is_pod<T>::value && !is_const<T>::value && !is_volatile<T>::value
+ >{};
+ #endif
+
+ #define EASTL_DECLARE_HAS_TRIVIAL_ASSIGN(T, hasTrivialAssign) \
+ namespace eastl { \
+ template <> struct has_trivial_assign<T> : public eastl::integral_constant<bool, hasTrivialAssign> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_TRIVIAL_ASSIGN(T) namespace eastl{ template <> struct has_trivial_assign<T> : public true_type{}; template <> struct has_trivial_assign<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_destructor
+ //
+ // has_trivial_destructor<T>::value == true if and only if T is a class
+ // or struct that has a trivial destructor. A destructor is trivial if
+ // - it is implicitly defined by the compiler, and
+ // - for every direct base class of T, has_trivial_destructor<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or
+ // array of class type, has_trivial_destructor<M>::value == true,
+ // where M is the type of the data member
+ //
+ // has_trivial_destructor may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_destructor will not
+ // report that a class or struct has a trivial destructor.
+ // The user can use EASTL_DECLARE_TRIVIAL_DESTRUCTOR to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1600)
+ #define EASTL_TYPE_TRAIT_has_trivial_destructor_CONFORMANCE 1 // has_trivial_destructor is conforming.
+
+ template <typename T>
+ struct has_trivial_destructor : public eastl::integral_constant<bool, (__has_trivial_destructor(T) || eastl::is_pod<T>::value) && !eastl::is_hat_type<T>::value>{};
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ #define EASTL_TYPE_TRAIT_has_trivial_destructor_CONFORMANCE 1 // has_trivial_destructor is conforming.
+
+ template <typename T>
+ struct has_trivial_destructor : public eastl::integral_constant<bool, __has_trivial_destructor(T) || eastl::is_pod<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_trivial_destructor_CONFORMANCE 0 // is_pod is not fully conforming. Can return false negatives.
+
+ // With current compilers, this is all we can do.
+ template <typename T>
+ struct has_trivial_destructor : public eastl::is_pod<T>{};
+ #endif
+
+ #define EASTL_DECLARE_HAS_TRIVIAL_DESTRUCTOR(T, hasTrivialDestructor) \
+ namespace eastl { \
+ template <> struct has_trivial_destructor<T> : public eastl::integral_constant<bool, hasTrivialDestructor> { }; \
+ }
+
+ // Old style macro, for bacwards compatibility:
+ #define EASTL_DECLARE_TRIVIAL_DESTRUCTOR(T) namespace eastl{ template <> struct has_trivial_destructor<T> : public true_type{}; template <> struct has_trivial_destructor<const T> : public true_type{}; }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool has_trivial_destructor_v = has_trivial_destructor<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_relocate
+ //
+ // This is an EA extension to the type traits standard.
+ // This trait is deprecated under conforming C++11 compilers, as C++11
+ // move functionality supercedes this functionality and we want to
+ // migrate away from it in the future.
+ //
+ // A trivially relocatable object is one that can be safely memmove'd
+ // to uninitialized memory. construction, assignment, and destruction
+ // properties are not addressed by this trait. A type that has the
+ // is_fundamental trait would always have the has_trivial_relocate trait.
+ // A type that has the has_trivial_constructor, has_trivial_copy or
+ // has_trivial_assign traits would usally have the has_trivial_relocate
+ // trait, but this is not strictly guaranteed.
+ //
+ // The user can use EASTL_DECLARE_TRIVIAL_RELOCATE to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_has_trivial_relocate_CONFORMANCE 0 // is_pod is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_trivial_relocate : public eastl::bool_constant<eastl::is_pod_v<T> && !eastl::is_volatile_v<T>> {};
+
+ #define EASTL_DECLARE_TRIVIAL_RELOCATE(T) namespace eastl{ template <> struct has_trivial_relocate<T> : public true_type{}; template <> struct has_trivial_relocate<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_nothrow_constructor
+ //
+ // has_nothrow_constructor<T>::value == true if and only if T is a
+ // class or struct whose default constructor has an empty throw specification.
+ //
+ // has_nothrow_constructor may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ #define EASTL_TYPE_TRAIT_has_nothrow_constructor_CONFORMANCE 1
+
+ template <typename T>
+ struct has_nothrow_constructor
+ : public eastl::integral_constant<bool, __has_nothrow_constructor(T)>{};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && defined(_MSC_VER)
+ // Microsoft's implementation of __has_nothrow_constructor is crippled and returns true only if T is a class that has an explicit constructor.
+ // "Returns true if the default constructor has an empty exception specification."
+ #define EASTL_TYPE_TRAIT_has_nothrow_constructor_CONFORMANCE 0
+
+ template <typename T> // This is mistakenly returning true for an unbounded array of scalar type.
+ struct has_nothrow_constructor : public eastl::integral_constant<bool, __has_nothrow_constructor(T) || eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value>{};
+
+ #else
+ #define EASTL_TYPE_TRAIT_has_nothrow_constructor_CONFORMANCE 0 // has_nothrow_constructor is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_nothrow_constructor // To do: Improve this to include other types that can work.
+ { static const bool value = eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value; };
+ #endif
+
+ #define EASTL_DECLARE_HAS_NOTHROW_CONSTRUCTOR(T, hasNothrowConstructor) \
+ namespace eastl { \
+ template <> struct has_nothrow_constructor<T> : public eastl::integral_constant<bool, hasNothrowConstructor> { }; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_nothrow_copy
+ //
+ // has_nothrow_copy<T>::value == true if and only if T is a class or
+ // struct whose copy constructor has an empty throw specification.
+ //
+ // has_nothrow_copy may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ #define EASTL_TYPE_TRAIT_has_nothrow_copy_CONFORMANCE 1
+
+ template <typename T>
+ struct has_nothrow_copy : public eastl::integral_constant<bool, __has_nothrow_copy(T)>{};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && defined(_MSC_VER)
+ // Microsoft's implementation of __has_nothrow_copy is crippled and returns true only if T is a class that has a copy constructor.
+ // "Returns true if the copy constructor has an empty exception specification."
+ #define EASTL_TYPE_TRAIT_has_nothrow_copy_CONFORMANCE 0
+
+ template <typename T>
+ struct has_nothrow_copy : public eastl::integral_constant<bool, __has_nothrow_copy(T) || eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value>{};
+
+ #else
+ #define EASTL_TYPE_TRAIT_has_nothrow_copy_CONFORMANCE 0 // has_nothrow_copy is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_nothrow_copy // To do: Improve this to include other types that can work.
+ { static const bool value = eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value; };
+ #endif
+
+ #define EASTL_DECLARE_HAS_NOTHROW_COPY(T, hasNothrowCopy) \
+ namespace eastl { \
+ template <> struct has_nothrow_copy<T> : public eastl::integral_constant<bool, hasNothrowCopy> { }; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_nothrow_assign
+ //
+ // has_nothrow_assign<T>::value == true if and only if T is a class or
+ // struct whose copy assignment operator has an empty throw specification.
+ //
+ // has_nothrow_assign may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ #define EASTL_TYPE_TRAIT_has_nothrow_assign_CONFORMANCE 1
+
+ template <typename T>
+ struct has_nothrow_assign : public eastl::integral_constant<bool, __has_nothrow_assign(T)>{};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && defined(_MSC_VER)
+ // Microsoft's implementation of __has_nothrow_assign is crippled and returns true only if T is a class that has an assignment operator.
+ // "Returns true if a copy assignment operator has an empty exception specification."
+ #define EASTL_TYPE_TRAIT_has_nothrow_assign_CONFORMANCE 0
+
+ template <typename T> // This is mistakenly returning true for an unbounded array of scalar type.
+ struct has_nothrow_assign : public eastl::integral_constant<bool, __has_nothrow_assign(T) || eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_nothrow_assign_CONFORMANCE 0 // has_nothrow_assign is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_nothrow_assign // To do: Improve this to include other types that can work.
+ { static const bool value = eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value || eastl::is_reference<T>::value; } ;
+ #endif
+
+ #define EASTL_DECLARE_HAS_NOTHROW_ASSIGN(T, hasNothrowAssign) \
+ namespace eastl { \
+ template <> struct has_nothrow_assign<T> : public eastl::integral_constant<bool, hasNothrowAssign> { }; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_virtual_destructor
+ //
+ // has_virtual_destructor<T>::value == true if and only if T is a class
+ // or struct with a virtual destructor.
+ //
+ // has_virtual_destructor may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ #define EASTL_TYPE_TRAIT_has_virtual_destructor_CONFORMANCE 1
+
+ template <typename T>
+ struct has_virtual_destructor : public eastl::integral_constant<bool, __has_virtual_destructor(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_has_virtual_destructor_CONFORMANCE 0 // has_virtual_destructor is not fully conforming. Can return false negatives.
+
+ template <typename T>
+ struct has_virtual_destructor : public eastl::false_type{};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool has_virtual_destructor_v = has_virtual_destructor<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_HAS_VIRTUAL_DESTRUCTOR(T, hasVirtualDestructor) \
+ namespace eastl { \
+ template <> struct has_virtual_destructor<T> : public eastl::integral_constant<bool, hasVirtualDestructor> { }; \
+ template <> struct has_virtual_destructor<const T> : public eastl::integral_constant<bool, hasVirtualDestructor> { }; \
+ template <> struct has_virtual_destructor<volatile T> : public eastl::integral_constant<bool, hasVirtualDestructor> { }; \
+ template <> struct has_virtual_destructor<const volatile T> : public eastl::integral_constant<bool, hasVirtualDestructor> { }; \
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_literal_type
+ //
+ // See the C++11 Standard, section 2.9,p10.
+ // A type is a literal type if it is:
+ // - a scalar type; or
+ // - a reference type referring to a literal type; or
+ // - an array of literal type; or
+ // - a class type (Clause 9) that has all of the following properties:
+ // - it has a trivial destructor,
+ // - every constructor call and full-expression in the brace-or-equal-initializer s for non-static data members (if any) is a constant expression (5.19),
+ // - it is an aggregate type (8.5.1) or has at least one constexpr constructor or constructor template that is not a copy or move constructor, and
+ // - all of its non-static data members and base classes are of literal types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_literal))
+ #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 1
+
+ template <typename T>
+ struct is_literal_type : public eastl::integral_constant<bool, __is_literal(T)>{};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006)) || (defined(_MSC_VER) && (_MSC_VER >= 1700))) // VS2012+
+ #if defined(EA_COMPILER_GNUC) && (!defined(EA_COMPILER_CPP11_ENABLED) || (EA_COMPILER_VERSION < 4007))
+ #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 0 // It seems that in this case GCC supports the compiler intrinsic but reports it as false when it's true.
+ #else
+ #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 1
+ #endif
+
+ template <typename T>
+ struct is_literal_type : public eastl::integral_constant<bool, __is_literal_type(T)>{};
+
+ #else
+ #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 0
+
+ // It's not clear if this trait can be fully implemented without explicit compiler support.
+ // For now we assume that it can't be but implement something that gets it right at least
+ // some of the time. Recall that partial positives and false negatives are OK (though not ideal),
+ // while false positives are not OK for us to generate.
+
+ template <typename T> // This is not a complete implementation and will be true for only some literal types (the basic ones).
+ struct is_literal_type : public eastl::integral_constant<bool, eastl::is_scalar<typename eastl::remove_reference<typename eastl::remove_all_extents<T>::type>::type>::value>{};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_literal_type_v = is_literal_type<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_abstract
+ //
+ // is_abstract<T>::value == true if and only if T is a class or struct
+ // that has at least one pure virtual function. is_abstract may only
+ // be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_abstract)))
+ #define EASTL_TYPE_TRAIT_is_abstract_CONFORMANCE 1 // is_abstract is conforming.
+
+ template <typename T>
+ struct is_abstract : public integral_constant<bool, __is_abstract(T)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_abstract_CONFORMANCE 0
+
+ template<typename T, bool = !eastl::is_object<T>::value>
+ class is_abstract_helper
+ {
+ template<typename>
+ static eastl::yes_type test(...);
+
+ template<typename T1>
+ static eastl::no_type test(T1(*)[1]); // The following: 'typedef SomeAbstractClass (*SomeFunctionType)[1];' is invalid (can't have an array of abstract types) and thus doesn't choose this path.
+
+ public:
+ static const bool value = (sizeof(test<T>(NULL)) == sizeof(eastl::yes_type));
+ };
+
+ template <typename T>
+ struct is_abstract_helper<T, true>
+ { static const bool value = false; };
+
+ template <typename T>
+ struct is_abstract
+ : public integral_constant<bool, is_abstract_helper<T>::value> { };
+
+ #endif
+
+ #define EASTL_DECLARE_IS_ABSTRACT(T, isAbstract) \
+ namespace eastl { \
+ template <> struct is_abstract<T> : public eastl::integral_constant<bool, isAbstract> { }; \
+ template <> struct is_abstract<const T> : public eastl::integral_constant<bool, isAbstract> { }; \
+ template <> struct is_abstract<volatile T> : public eastl::integral_constant<bool, isAbstract> { }; \
+ template <> struct is_abstract<const volatile T> : public eastl::integral_constant<bool, isAbstract> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_abstract_v = is_abstract<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_copyable
+ //
+ // T is a trivially copyable type (3.9) T shall be a complete type,
+ // (possibly cv-qualified) void, or an array of unknown bound.
+ //
+ // 3.9,p3: For any trivially copyable type T, if two pointers to T
+ // point to distinct T objects obj1 and obj2, where neither obj1 nor
+ // obj2 is a base-class subobject, if the underlying bytes making
+ // up obj1 are copied into obj2, obj2 shall subsequently hold the
+ // same value as obj1. In other words, you can memcpy/memmove it.
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(_MSC_VER) && (_MSC_VER >= 1700)) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 5003)) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_trivially_copyable)))
+ #define EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE 1
+
+ // https://connect.microsoft.com/VisualStudio/feedback/details/808827/c-std-is-trivially-copyable-produces-wrong-result-for-arrays
+ //
+ // From Microsoft:
+ // We're working on fixing this. When overhauling <type_traits> in VC 2013, I incorrectly believed that is_trivially_copyable was a synonym
+ // for is_trivially_copy_constructible. I've asked the compiler team to provide a compiler hook with 100% accurate answers. (Currently, the
+ // compiler hook has incorrect answers for volatile scalars, volatile data members, and various scenarios for defaulted/deleted/private
+ // special member functions - I wrote an exhaustive test case to exercise the complicated Standardese.) When the compiler hook is fixed,
+ // I'll change <type_traits> to invoke it.
+ //
+ // Microsoft broken VS2013 STL implementation:
+ // template<class _Ty>
+ // struct is_trivially_copyable
+ // : is_trivially_copy_constructible<_Ty>::type
+ // { // determine whether _Ty has a trivial copy constructor
+ // };
+ //
+
+ template <typename T>
+ struct is_trivially_copyable { static const bool value = __is_trivially_copyable(T); };
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_GNUC))
+ #define EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE 1
+
+ // Micrsoft (prior to VS2012) and GCC have __has_trivial_copy, but it may not be identical with the goals of this type trait.
+ template <typename T>
+ struct is_trivially_copyable : public integral_constant<bool, (__has_trivial_copy(T) || eastl::is_pod<typename eastl::remove_all_extents<T>::type>::value) && (!eastl::is_void<T>::value && !eastl::is_volatile<T>::value && !eastl::is_reference<T>::value)>{};
+ #else
+ #define EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE 0 // Generates false negatives.
+
+ template <typename T>
+ struct is_trivially_copyable { static const bool value = eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value; };
+ #endif
+
+ #define EASTL_DECLARE_IS_TRIVIALLY_COPYABLE(T, isTriviallyCopyable) \
+ namespace eastl { \
+ template <> struct is_trivially_copyable<T> : public eastl::integral_constant<bool, isTriviallyCopyable> { }; \
+ template <> struct is_trivially_copyable<const T> : public eastl::integral_constant<bool, isTriviallyCopyable> { }; \
+ template <> struct is_trivially_copyable<volatile T> : public eastl::integral_constant<bool, isTriviallyCopyable> { }; \
+ template <> struct is_trivially_copyable<const volatile T> : public eastl::integral_constant<bool, isTriviallyCopyable> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_copyable_v = is_trivially_copyable<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_constructible
+ //
+ // See the C++11 Standard, section 20.9.4.3,p6.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE 1
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_constructible)))
+ template<typename T, typename... Args>
+ struct is_constructible : public bool_constant<__is_constructible(T, Args...) > {};
+ #else
+ // We implement a copy of move here has move_internal. We are currently stuck doing this because our move
+ // implementation is in <utility.h> and <utility.h> currently #includes us, and so we have a header
+ // chicken-and-egg problem. To do: Resolve this, probably by putting eastl::move somewhere else.
+ template <typename T>
+ inline typename eastl::remove_reference<T>::type&& move_internal(T&& x) EA_NOEXCEPT
+ { return ((typename eastl::remove_reference<T>::type&&)x); }
+
+ template <typename T, class ...Args>
+ typename first_type_select<eastl::true_type, decltype(eastl::move_internal(T(eastl::declval<Args>()...)))>::type is(T&&, Args&& ...);
+
+ template <typename T>
+ struct can_construct_scalar_helper
+ {
+ static eastl::true_type can(T);
+ static eastl::false_type can(...);
+ };
+
+ template <typename ...Args>
+ eastl::false_type is(argument_sink, Args&& ...);
+
+ // Except for scalars and references (handled below), check for constructibility via decltype.
+ template <bool, typename T, typename... Args>
+ struct is_constructible_helper_2 // argument_sink will catch all T that is not constructible from the Args and denote false_type
+ : public eastl::identity<decltype(is(eastl::declval<T>(), eastl::declval<Args>()...))>::type {};
+
+ template <typename T>
+ struct is_constructible_helper_2<true, T>
+ : public eastl::is_scalar<T> {};
+
+ template <typename T, typename Arg0> // We handle the case of multiple arguments below (by disallowing them).
+ struct is_constructible_helper_2<true, T, Arg0>
+ : public eastl::identity<decltype(can_construct_scalar_helper<T>::can(eastl::declval<Arg0>()))>::type {};
+
+ // Scalars and references can be constructed only with 0 or 1 argument. e.g the following is an invalid expression: int(17, 23)
+ template <typename T, typename Arg0, typename ...Args>
+ struct is_constructible_helper_2<true, T, Arg0, Args...>
+ : public eastl::false_type {};
+
+ template <bool, typename T, typename... Args>
+ struct is_constructible_helper_1
+ : public is_constructible_helper_2<eastl::is_scalar<T>::value || eastl::is_reference<T>::value, T, Args...> {};
+
+ // Unilaterally dismiss void, abstract, unknown bound arrays, and function types as not constructible.
+ template <typename T, typename... Args>
+ struct is_constructible_helper_1<true, T, Args...>
+ : public false_type {};
+
+ // is_constructible
+ template <typename T, typename... Args>
+ struct is_constructible
+ : public is_constructible_helper_1<(eastl::is_abstract<typename eastl::remove_all_extents<T>::type>::value ||
+ eastl::is_array_of_unknown_bounds<T>::value ||
+ eastl::is_function<typename eastl::remove_all_extents<T>::type>::value ||
+ eastl::has_void_arg<T, Args...>::value),
+ T, Args...> {};
+
+ // Array types are constructible if constructed with no arguments and if their element type is default-constructible
+ template <typename Array, size_t N>
+ struct is_constructible_helper_2<false, Array[N]>
+ : public eastl::is_constructible<typename eastl::remove_all_extents<Array>::type> {};
+
+ // Arrays with arguments are not constructible. e.g. the following is an invalid expression: int[3](37, 34, 12)
+ template <typename Array, size_t N, typename ...Args>
+ struct is_constructible_helper_2<false, Array[N], Args...>
+ : public eastl::false_type {};
+
+ #endif
+
+
+ // You need to manually declare const/volatile variants individually if you want them.
+ #define EASTL_DECLARE_IS_CONSTRUCTIBLE(T, U, isConstructible) \
+ namespace eastl { \
+ template <> struct is_constructible<T, U> : public eastl::integral_constant<bool, isConstructible> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, class... Args>
+ EA_CONSTEXPR bool is_constructible_v = is_constructible<T, Args...>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_constructible
+ //
+ // is_constructible<T, Args...>::value is true and the variable definition
+ // for is_constructible, as defined below, is known to call no operation
+ // that is not trivial (3.9, 12). T and all types in the parameter pack
+ // Args shall be complete types, (possibly cv-qualified) void, or arrays
+ // of unknown bound.
+ //
+ // Note:
+ // C++11's is_trivially_constructible sounds the same as the pre-standard
+ // has_trivial_constructor type trait (which we also support here). However,
+ // the definition of has_trivial_constructor has never been formally standardized
+ // and so we can't just blindly equate the two to each other. Since we are
+ // moving forward with C++11 and deprecating the old type traits, we leave
+ // the old ones as-is, though we defer to them in cases where we don't seem
+ // to have a good alternative.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(EA_COMPILER_NO_VARIADIC_TEMPLATES)
+
+ #define EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE 0
+
+ // In this version we allow only zero or one argument (Arg). We can add more arguments
+ // by creating a number of extra specializations. It's probably not possible to
+ // simplify the implementation with recursive templates because ctor argument
+ // presence is specific.
+ //
+ // To consider: we can fold the two implementations below by making a macro that's defined
+ // has __is_trivially_constructible(T) or eastl::has_trivial_copy<T>::value, depending on
+ // whether the __is_trivially_constructible compiler intrinsic is available.
+
+ // If the compiler has this trait built-in (which ideally all compilers would have since it's necessary for full conformance) use it.
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_trivially_constructible))
+
+ template <typename T, typename Arg0 = eastl::unused>
+ struct is_trivially_constructible
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, eastl::unused>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T&>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const T&>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, volatile T&>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const volatile T&>
+ : public eastl::integral_constant<bool, __is_trivially_constructible(T)> {};
+
+ #else
+
+ template <typename T, typename Arg0 = eastl::unused>
+ struct is_trivially_constructible
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, eastl::unused>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_constructor<typename eastl::remove_all_extents<T>::type>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, volatile T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const volatile T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ #endif
+
+ #else
+
+ // If the compiler has this trait built-in (which ideally all compilers would have since it's necessary for full conformance) use it.
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_trivially_constructible))
+ #define EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE 1
+
+ // We have a problem with clang here as of clang 3.4: __is_trivially_constructible(int[]) is false, yet I believe it should be true.
+ // Until it gets resolved, what we do is check for is_constructible along with __is_trivially_constructible().
+ template <typename T, typename... Args>
+ struct is_trivially_constructible
+ : public eastl::integral_constant<bool, eastl::is_constructible<T, Args...>::value && __is_trivially_constructible(T, Args...)> {};
+
+ #else
+
+ #define EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE 0 // This is 0 but in fact it will work for most real-world cases due to the has_trivial_constructor specialization below.
+
+ template <typename T, typename... Args>
+ struct is_trivially_constructible
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_trivially_constructible<T>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_constructor<typename eastl::remove_all_extents<T>::type>::value> {};
+
+ // It's questionable whether we can use has_trivial_copy here, as it could theoretically create a false-positive.
+ template <typename T>
+ struct is_trivially_constructible<T, T>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T&&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, volatile T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_constructible<T, const volatile T&>
+ : public eastl::integral_constant<bool, eastl::is_constructible<T>::value && eastl::has_trivial_copy<T>::value> {};
+
+ #endif
+
+ #endif
+
+
+ #define EASTL_DECLARE_IS_TRIVIALLY_CONSTRUCTIBLE(T, isTriviallyConstructible) \
+ namespace eastl { \
+ template <> struct is_trivially_constructible<T> : public eastl::integral_constant<bool, isTriviallyConstructible> { }; \
+ template <> struct is_trivially_constructible<const T> : public eastl::integral_constant<bool, isTriviallyConstructible> { }; \
+ template <> struct is_trivially_constructible<volatile T> : public eastl::integral_constant<bool, isTriviallyConstructible> { }; \
+ template <> struct is_trivially_constructible<const volatile T> : public eastl::integral_constant<bool, isTriviallyConstructible> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_constructible_v = is_trivially_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_default_constructible
+ //
+ // is_trivially_constructible<T>::value is true.
+ // This is thus identical to is_trivially_constructible.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivially_default_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_trivially_default_constructible
+ : public eastl::is_trivially_constructible<T> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_default_constructible_v = is_trivially_default_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivial
+ //
+ // is_trivial<T>::value == true if T is a scalar type, a trivially copyable
+ // class with a trivial default constructor, or array of such type/class,
+ // possibly cv-qualified), provides the member constant value equal true.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivial_CONFORMANCE ((EASTL_TYPE_TRAIT_is_trivially_default_constructible_CONFORMANCE && EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE) ? 1 : 0)
+
+ #if defined(_MSC_VER) && _MSC_VER == 1800
+ template<bool, typename T>
+ struct is_trivial_helper
+ : public eastl::integral_constant<bool, eastl::is_trivially_copyable<T>::value && eastl::is_trivially_default_constructible<T>::value>{};
+
+ template<typename T>
+ struct is_trivial_helper<true, T>
+ : public false_type{};
+
+ template <typename T>
+ struct is_trivial
+ : public is_trivial_helper<(EA_ALIGN_OF(T) > EA_PLATFORM_MIN_MALLOC_ALIGNMENT), T>::type{};
+ #else
+ // All other compilers seem to be able to handle aligned types passed as value
+ template <typename T>
+ struct is_trivial
+ : public eastl::integral_constant<bool, eastl::is_trivially_copyable<T>::value && eastl::is_trivially_default_constructible<T>::value> {};
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivial_v = is_trivial<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_constructible
+ //
+ // is_constructible<T, Args...>::value is true and the variable definition
+ // for is_constructible, as defined below, is known not to throw any
+ // exceptions (5.3.7). T and all types in the parameter pack Args shall
+ // be complete types, (possibly cv-qualified) void, or arrays of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #if defined(EA_COMPILER_NO_NOEXCEPT)
+
+ #define EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE 0
+
+ template <typename T, typename... Args>
+ struct is_nothrow_constructible
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_nothrow_constructible<T>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_constructor<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_constructible<T, T>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_copy<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_constructible<T, const T&>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_copy<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_constructible<T, T&>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_copy<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_constructible<T, T&&>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_copy<T>::value> {};
+
+ #else
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION < 4008)
+ #define EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE 0 // GCC up to v4.7's noexcept is broken and fails to generate true for the case of compiler-generated constructors.
+ #else
+ #define EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE
+ #endif
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // *_noexcept_wrapper implements a workaround for VS2015 preview. A standards conforming noexcept operator allows variadic template expansion.
+ // There appears to be an issue with VS2015 preview that prevents variadic template expansion into a noexcept operator that is passed directly
+ // to a template parameter.
+ //
+ // The fix hoists the noexcept expression into a separate struct and caches the result of the expression. This result is then passed to integral_constant.
+ //
+ // Example code from Clang libc++
+ // template <class _Tp, class... _Args>
+ // struct __libcpp_is_nothrow_constructible<[>is constructible*/true, /*is reference<]false, _Tp, _Args...>
+ // : public integral_constant<bool, noexcept(_Tp(declval<_Args>()...))> { };
+ //
+
+ template <typename T, typename... Args>
+ struct is_nothrow_constructible_helper_noexcept_wrapper
+ { static const bool value = noexcept(T(eastl::declval<Args>()...)); };
+
+ template <bool, typename T, typename... Args>
+ struct is_nothrow_constructible_helper;
+
+ template <typename T, typename... Args>
+ struct is_nothrow_constructible_helper<true, T, Args...>
+ : public eastl::integral_constant<bool, is_nothrow_constructible_helper_noexcept_wrapper<T, Args...>::value> {};
+
+ template<typename T, typename Arg>
+ struct is_nothrow_constructible_helper<true, T, Arg>
+ : public eastl::integral_constant<bool, noexcept(T(eastl::declval<Arg>()))> {};
+
+ template<typename T>
+ struct is_nothrow_constructible_helper<true, T>
+ : public eastl::integral_constant<bool, noexcept(T())> {};
+
+ template <typename T, typename... Args>
+ struct is_nothrow_constructible_helper<false, T, Args...>
+ : public eastl::false_type {};
+
+ template <typename T, typename... Args>
+ struct is_nothrow_constructible
+ : public eastl::is_nothrow_constructible_helper<eastl::is_constructible<T, Args...>::value, T, Args...> {};
+
+ template <typename T, size_t N>
+ struct is_nothrow_constructible<T[N]>
+ : public eastl::is_nothrow_constructible_helper<eastl::is_constructible<T>::value, T> {};
+ #endif
+
+ #define EASTL_DECLARE_IS_NOTHROW_CONSTRUCTIBLE(T, isNothrowConstructible) \
+ namespace eastl{ \
+ template <> struct is_nothrow_constructible<T> : public eastl::integral_constant<bool, isNothrowConstructible> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, typename... Args>
+ EA_CONSTEXPR bool is_nothrow_constructible_v = is_nothrow_constructible<T, Args...>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_default_constructible
+ //
+ // is_constructible<T>::value is true.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_default_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_default_constructible
+ : public eastl::is_constructible<T> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_default_constructible_v = is_default_constructible<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_default_constructible
+ ///////////////////////////////////////////////////////////////////////
+ // TODO(rparolin): implement type-trait
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_copy_constructible
+ //
+ // is_constructible<T, const T&>::value is true.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_copy_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_copy_constructible
+ : public eastl::is_constructible<T, typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_copy_constructible_v = is_copy_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_copy_constructible
+ //
+ // is_trivially_constructible<T, const T&>::value is true.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivially_copy_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_trivially_copy_constructible
+ : public eastl::is_trivially_constructible<T, typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_copy_constructible_v = is_trivially_copy_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_copy_constructible
+ //
+ // is_nothrow_-constructible<T, const T&>::value is true.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_nothrow_copy_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_nothrow_copy_constructible
+ : public is_nothrow_constructible<T, typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_copy_constructible_v = is_nothrow_copy_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_move_constructible
+ //
+ // is_constructible<T, T&&>::value is true.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_move_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_move_constructible
+ : public eastl::is_constructible<T, typename eastl::add_rvalue_reference<T>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_move_constructible_v = is_move_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_move_constructible
+ //
+ // is_trivially_constructible<T, T&&>::value is true.
+ // T shall be a complete type, (possibly cv-qualified) void, or an
+ // array of unknown bound.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivially_move_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_trivially_move_constructible
+ : public eastl::is_trivially_constructible<T, typename eastl::add_rvalue_reference<T>::type> {};
+
+ #define EASTL_DECLARE_IS_TRIVIALLY_MOVE_CONSTRUCTIBLE(T, isTrivallyMoveConstructible) \
+ namespace eastl{ \
+ template <> struct is_trivially_move_constructible<T> : public eastl::integral_constant<bool, isTriviallyMoveConstructible> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_move_constructible_v = is_trivially_move_constructible<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_assignable
+ //
+ // The expression declval<T>() = declval<U>() is well-formed when treated as an unevaluated operand.
+ // Access checking is performed as if in a context unrelated to T and U. Only the validity of
+ // the immediate context of the assignment expression is considered. The compilation of the expression
+ // can result in side effects such as the instantiation of class template specializations and function
+ // template specializations, the generation of implicitly-defined functions, and so on. Such side
+ // effects are not in the "immediate context" and can result in the program being ill-formed.
+ //
+ // Note:
+ // This type trait has a misleading and counter-intuitive name. It does not indicate whether an instance
+ // of U can be assigned to an instance of T (e.g. t = u). Instead it indicates whether the assignment can be
+ // done after adding rvalue references to both, as in add_rvalue_reference<T>::type = add_rvalue_reference<U>::type.
+ // A counterintuitive result of this is that is_assignable<int, int>::value == false. The is_copy_assignable
+ // trait indicates if a type can be assigned to its own type, though there isn't a standard C++ way to tell
+ // if an arbitrary type is assignable to another type.
+ // http://stackoverflow.com/questions/19920213/why-is-stdis-assignable-counter-intuitive
+ //
+ // Note:
+ // A true is_assignable value doesn't guarantee that the expression is compile-able, the compiler checks
+ // only that the assignment matches before compilation. In particular, if you have templated operator=
+ // for a class, the compiler will always say is_assignable is true, regardless of what's being tested
+ // on the right hand side of the expression. It may actually turn out during compilation that the
+ // templated operator= fails to compile because in practice it doesn't accept every possible type for
+ // the right hand side of the expression.
+ //
+ // Expected results:
+ // is_assignable<void, void>::value == false
+ // is_assignable<int&, int>::value == true
+ // is_assignable<int, int>::value == false
+ // is_assignable<int, int&>::value == false
+ // is_assignable<bool, bool>::value == false
+ // is_assignable<int, float>::value == false
+ // is_assignable<int[], int[]>::value == false
+ // is_assignable<char*, int*>::value == false
+ // is_assignable<char*, const char*>::value == false
+ // is_assignable<const char*, char*>::value == false
+ // is_assignable<PodA, PodB*>::value == false
+ // is_assignable<Assignable, Assignable>::value == true
+ // is_assignable<Assignable, Unrelated>::value == false
+ //
+ // Note:
+ // Our implementation here yields different results than does the std::is_assignable from Dinkumware-based Standard
+ // Libraries, but yields similar results to the std::is_assignable from GCC's libstdc++ and clang's libc++. It may
+ // possibly be that the Dinkumware results are intentionally different for some practical purpose or because they
+ // represent the spirit or the Standard but not the letter of the Standard.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #define EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE 1
+
+ template<typename T, typename U>
+ struct is_assignable_helper
+ {
+ template<typename, typename>
+ static eastl::no_type is(...);
+
+ template<typename T1, typename U1>
+ static decltype(eastl::declval<T1>() = eastl::declval<U1>(), eastl::yes_type()) is(int);
+
+ static const bool value = (sizeof(is<T, U>(0)) == sizeof(eastl::yes_type));
+ };
+
+ template<typename T, typename U>
+ struct is_assignable :
+ public eastl::integral_constant<bool, eastl::is_assignable_helper<T, U>::value> {};
+
+ // The main purpose of this function is to help the non-conforming case above.
+ // Note: We don't handle const/volatile variations here, as we expect the user to
+ // manually specify any such variations via this macro.
+ // Example usage:
+ // EASTL_DECLARE_IS_ASSIGNABLE(int, int, false)
+ //
+ #define EASTL_DECLARE_IS_ASSIGNABLE(T, U, isAssignable) \
+ namespace eastl { \
+ template <> struct is_assignable<T, U> : public eastl::integral_constant<bool, isAssignable> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, class U>
+ EA_CONSTEXPR bool is_assignable_v = is_assignable<T, U>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_lvalue_assignable
+ //
+ // This is an EASTL extension function which is like is_assignable but
+ // works for arbitrary assignments and not just rvalue assignments.
+ // This function provides an intuitive assignability test, as opposed
+ // to is_assignable.
+ //
+ // Note: is_lvalue_assignable<T, T> === is_copy_assignable<T>
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_lvalue_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+
+ template <typename T, typename U>
+ struct is_lvalue_assignable
+ : public eastl::is_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_lvalue_reference<typename eastl::add_const<U>::type>::type> {};
+
+ #define EASTL_DECLARE_IS_LVALUE_ASSIGNABLE(T, U, isLvalueAssignable) \
+ namespace eastl { \
+ template <> struct is_lvalue_assignable<T, U> : public eastl::integral_constant<bool, isLvalueAssignable> { }; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_assignable
+ //
+ // is_assignable<T, U>::value is true and the assignment, as defined by
+ // is_assignable, is known to call no operation that is not trivial (3.9, 12).
+ // T and U shall be complete types, (possibly cv-qualified) void, or
+ // arrays of unknown bound
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_trivially_assignable))
+ #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE 1
+
+ template <typename T, typename U>
+ struct is_trivially_assignable
+ : eastl::integral_constant<bool, __is_trivially_assignable(T, U)> {};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) && (_MSC_VER >= 1800))
+ #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+
+ // This code path is attempting to work around the issue with VS2013 __is_trivially_assignable compiler intrinsic documented in the link
+ // below. todo: Re-evaluate in VS2014.
+ //
+ // https://connect.microsoft.com/VisualStudio/feedback/details/806233/std-is-trivially-copyable-const-int-n-and-std-is-trivially-copyable-int-n-incorrect
+
+ template <bool A, typename T, typename U>
+ struct is_trivially_assignable_helper;
+
+ template <typename T, typename U>
+ struct is_trivially_assignable_helper<true, T, U> : eastl::integral_constant<bool, __is_trivially_assignable(T, U)>{};
+
+ template <typename T, typename U>
+ struct is_trivially_assignable_helper<false, T, U> : false_type{};
+
+ template <typename T, typename U>
+ struct is_trivially_assignable
+ : eastl::integral_constant<bool, is_trivially_assignable_helper< eastl::is_assignable<T, U>::value, T, U >::value> {};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_GNUC))
+ #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+
+ // Micrsoft (up till at least VS2012) and GCC have __has_trivial_assign, but it may not be identical with the goals of this type trait.
+ // The Microsoft type trait headers suggest that a future version of VS will have a __is_trivially_assignable intrinsic, but we
+ // need to come up with something in the meantime. To do: Re-evalulate this for VS2013+ when it becomes available.
+ template <typename T, typename U>
+ struct is_trivially_assignable
+ : eastl::integral_constant<bool, eastl::is_assignable<T, U>::value &&
+ (eastl::is_pod<typename eastl::remove_reference<T>::type>::value || __has_trivial_assign(typename eastl::remove_reference<T>::type))> {};
+ #else
+
+ #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE 0 // Generates false negatives.
+
+ template <typename T, typename U>
+ struct is_trivially_assignable
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_trivially_assignable<T&, T>
+ : public eastl::integral_constant<bool, eastl::is_scalar<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_assignable<T&, T&>
+ : public eastl::integral_constant<bool, eastl::is_scalar<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_assignable<T&, const T&>
+ : public eastl::integral_constant<bool, eastl::is_scalar<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_assignable<T&, T&&>
+ : public eastl::integral_constant<bool, eastl::is_scalar<T>::value> {};
+
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, class U>
+ EA_CONSTEXPR bool is_trivially_assignable_v = is_trivially_assignable<T, U>::value;
+ #endif
+
+ // The main purpose of this function is to help the non-conforming case above.
+ // Note: We don't handle const/volatile variations here, as we expect the user to
+ // manually specify any such variations via this macro.
+ // Example usage:
+ // EASTL_DECLARE_IS_TRIVIALLY_ASSIGNABLE(int, int, false)
+ //
+ #define EASTL_DECLARE_IS_TRIVIALLY_ASSIGNABLE(T, U, isTriviallyAssignable) \
+ namespace eastl { \
+ template <> struct is_trivially_assignable<T, U> : public eastl::integral_constant<bool, isTriviallyAssignable> { }; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_assignable
+ //
+ // is_assignable<T, U>::value is true and the assignment is known
+ // not to throw any exceptions (5.3.7). T and U shall be complete
+ // types, (possibly cv-qualified) void, or arrays of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1800) // VS2013+
+ #define EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE 1
+
+ template <typename T, typename U>
+ struct is_nothrow_assignable
+ : eastl::integral_constant<bool, __is_nothrow_assignable(T, U)> {};
+
+ #elif defined(EA_COMPILER_NO_NOEXCEPT) || defined(__EDG_VERSION__) // EDG mis-compiles the conforming code below and so must be placed here.
+ #define EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE 0
+
+ template <typename T, typename U>
+ struct is_nothrow_assignable
+ : public false_type {};
+
+ // Note that the following are crippled in that they support only assignment of T types to other T types.
+ template <typename T>
+ struct is_nothrow_assignable<T&, T>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_assign<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_assignable<T&, T&>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_assign<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_assignable<T&, const T&>
+ : public eastl::integral_constant<bool, eastl::has_nothrow_assign<T>::value> {};
+
+ #else
+ #define EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE 1
+
+ template <bool, typename T, typename U>
+ struct is_nothrow_assignable_helper;
+
+ template <typename T, typename U>
+ struct is_nothrow_assignable_helper<false, T, U>
+ : public false_type {};
+
+ template <typename T, typename U>
+ struct is_nothrow_assignable_helper<true, T, U> // Set to true if the assignment (same as is_assignable) cannot generate an exception.
+ : public eastl::integral_constant<bool, noexcept(eastl::declval<T>() = eastl::declval<U>()) >
+ {
+ };
+
+ template <typename T, typename U>
+ struct is_nothrow_assignable
+ : public eastl::is_nothrow_assignable_helper<eastl::is_assignable<T, U>::value, T, U>
+ {
+ };
+ #endif
+
+ #define EASTL_DECLARE_IS_NOTHROW_ASSIGNABLE(T, isNothrowAssignable) \
+ namespace eastl{ \
+ template <> struct is_nothrow_assignable<T> : public eastl::integral_constant<bool, isNothrowAssignable> { }; \
+ template <> struct is_nothrow_assignable<const T> : public eastl::integral_constant<bool, isNothrowAssignable> { }; \
+ template <> struct is_nothrow_assignable<volatile T> : public eastl::integral_constant<bool, isNothrowAssignable> { }; \
+ template <> struct is_nothrow_assignable<const volatile T> : public eastl::integral_constant<bool, isNothrowAssignable> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T, class U>
+ EA_CONSTEXPR bool is_nothrow_assignable_v = is_nothrow_assignable<T, U>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_copy_assignable
+ //
+ // is_assignable<T&, const T&>::value is true. T shall be a complete type,
+ // (possibly cv -qualified) void, or an array of unknown bound.
+ //
+ // This (and not is_assignable) is the type trait you use to tell if you
+ // can do an arbitrary assignment. is_assignable tells if you can do an
+ // assignment specifically to an rvalue and not in general.
+ // http://stackoverflow.com/a/19921030/725009
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_copy_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+
+ template <typename T>
+ struct is_copy_assignable
+ : public eastl::is_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_copy_assignable_v = is_copy_assignable<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_copy_assignable
+ //
+ // is_trivially_assignable<T&, const T&>::value is true. T shall be a
+ // complete type, (possibly cv-qualified) void, or an array of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivially_copy_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE
+
+#if EASTL_TYPE_TRAIT_is_trivially_copy_assignable_CONFORMANCE
+ template <typename T>
+ struct is_trivially_copy_assignable
+ : public eastl::is_trivially_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+#else
+ template <typename T>
+ struct is_trivially_copy_assignable
+ : public integral_constant<bool,
+ eastl::is_scalar<T>::value || eastl::is_pod<T>::value || eastl::is_trivially_assignable<typename eastl::add_lvalue_reference<T>::type, typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type>::value
+ > {};
+#endif
+
+ #define EASTL_DECLARE_IS_TRIVIALLY_COPY_ASSIGNABLE(T, isTriviallyCopyAssignable) \
+ namespace eastl { \
+ template <> struct is_trivially_copy_assignable<T> : public eastl::integral_constant<bool, isTriviallyCopyAssignable> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_copy_assignable_v = is_trivially_copy_assignable<T>::value;
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_copy_assignable
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_nothrow_copy_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE
+
+ template <typename T>
+ struct is_nothrow_copy_assignable
+ : public eastl::is_nothrow_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_lvalue_reference<typename eastl::add_const<T>::type>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_copy_assignable_v = is_nothrow_copy_assignable<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_move_assignable
+ //
+ // is_assignable<T&, T&&>::value is true. T shall be a complete type,
+ // (possibly cv -qualified) void, or an array of unknown bound.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_move_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE
+
+ template <typename T>
+ struct is_move_assignable
+ : public eastl::is_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_rvalue_reference<T>::type> {};
+
+ #define EASTL_DECLARE_IS_MOVE_ASSIGNABLE(T, isMoveAssignable) \
+ namespace eastl{ \
+ template <> struct is_move_assignable<T> : public eastl::integral_constant<bool, isMoveAssignable> { }; \
+ template <> struct is_move_assignable<const T> : public eastl::integral_constant<bool, isMoveAssignable> { }; \
+ template <> struct is_move_assignable<volatile T> : public eastl::integral_constant<bool, isMoveAssignable> { }; \
+ template <> struct is_move_assignable<const volatile T> : public eastl::integral_constant<bool, isMoveAssignable> { }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_move_assignable_v = is_move_assignable<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_move_assignable
+ //
+ // is_trivially_-assignable<T&, T&&>::value is true. T shall be a complete type,
+ // (possibly cv-qualified) void, or an array of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_trivially_move_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE
+
+ template <typename T>
+ struct is_trivially_move_assignable
+ : public eastl::is_trivially_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_rvalue_reference<T>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_move_assignable_v = is_trivially_move_assignable<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_move_assignable
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_nothrow_move_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE
+
+ template <typename T>
+ struct is_nothrow_move_assignable
+ : public eastl::is_nothrow_assignable<typename eastl::add_lvalue_reference<T>::type,
+ typename eastl::add_rvalue_reference<T>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_move_assignable_v = is_nothrow_move_assignable<T>::value;
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_destructible
+ //
+ // For a complete type T and given
+ // template <class U>
+ // struct test { U u; };
+ // test<T>::~test() is not deleted (C++11 "= delete").
+ // T shall be a complete type, (possibly cv-qualified) void, or an array of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if 0 // defined(_MSC_VER) && (_MSC_VER >= 1800) // VS2013+ -- Disabled due to __is_destructible being broken in VC++ versions up to at least VS2013. A ticket will be submitted for this
+ #define EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE 1
+
+ template <typename T>
+ struct is_destructible
+ : integral_constant<bool, __is_destructible(T)> {};
+
+ #elif defined(EA_COMPILER_NO_DECLTYPE) || defined(EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS) || defined(_MSC_VER) || defined(__EDG_VERSION__) // VS2012 and EDG mis-compile the conforming code below and so must be placed here.
+ #define EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE 0
+
+ // This implementation works for almost all cases, with the primary exception being the
+ // case that the user declared the destructor as deleted. To deal with that case the
+ // user needs to use EASTL_DECLARE_IS_NOT_DESTRUCTIBLE to cause is_destructible<T>::value
+ // to be false.
+
+ template <typename T>
+ struct is_destructible
+ : public eastl::integral_constant<bool, !eastl::is_array_of_unknown_bounds<T>::value &&
+ !eastl::is_void<T>::value &&
+ !eastl::is_function<T>::value &&
+ !eastl::is_abstract<T>::value> {};
+ #else
+ #define EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE 1
+
+ template <typename U>
+ struct destructible_test_helper{ U u; };
+
+ template <typename>
+ eastl::false_type destructible_test_function(...);
+
+ template <typename T, typename U = decltype(eastl::declval<eastl::destructible_test_helper<T> >().~destructible_test_helper<T>())>
+ eastl::true_type destructible_test_function(int);
+
+ template <typename T, bool = eastl::is_array_of_unknown_bounds<T>::value || // Exclude these types from being considered destructible.
+ eastl::is_void<T>::value ||
+ eastl::is_function<T>::value ||
+ eastl::is_abstract<T>::value>
+ struct is_destructible_helper
+ : public eastl::identity<decltype(eastl::destructible_test_function<T>(0))>::type {}; // Need to wrap decltype with identity because some compilers otherwise don't like the bare decltype usage.
+
+ template <typename T>
+ struct is_destructible_helper<T, true>
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_destructible
+ : public is_destructible_helper<T> {};
+
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_destructible_v = is_destructible<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_IS_DESTRUCTIBLE(T, isDestructible) \
+ namespace eastl{ \
+ template <> struct is_destructible<T> : public eastl::integral_constant<bool, isDestructible>{}; \
+ template <> struct is_destructible<const T> : public eastl::integral_constant<bool, isDestructible>{}; \
+ template <> struct is_destructible<volatile T> : public eastl::integral_constant<bool, isDestructible>{}; \
+ template <> struct is_destructible<const volatile T> : public eastl::integral_constant<bool, isDestructible>{}; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_trivially_destructible
+ //
+ // is_destructible<T>::value is true and the indicated destructor is
+ // known to be trivial. T shall be a complete type, (possibly cv-qualified)
+ // void, or an array of unknown bound.
+ //
+ // A destructor is trivial if it is not user-provided and if:
+ // - the destructor is not virtual,
+ // - all of the direct base classes of its class have trivial destructors, and
+ // - for all of the non-static data members of its class that are of
+ // class type (or array thereof), each such class has a trivial destructor.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if 0 // defined(_MSC_VER) && (_MSC_VER >= 1800) // VS2013+ -- Disabled due to __is_trivially_destructible being broken in VC++ versions up to at least VS2013. A ticket will be submitted for this
+ #define EASTL_TYPE_TRAIT_is_trivially_destructible_CONFORMANCE 1
+
+ template <typename T>
+ struct is_trivially_destructible
+ : integral_constant<bool, __is_trivially_destructible(T)> {};
+
+ #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG))
+ #define EASTL_TYPE_TRAIT_is_trivially_destructible_CONFORMANCE EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE
+
+ template <typename T>
+ struct is_trivially_destructible // Can't use just __has_trivial_destructor(T) because some compilers give it slightly different meaning, and are just plain broken, such as VC++'s __has_trivial_destructor, which says false for fundamental types.
+ : public integral_constant<bool, eastl::is_destructible<T>::value && ((__has_trivial_destructor(T) && !eastl::is_hat_type<T>::value)|| eastl::is_scalar<typename eastl::remove_all_extents<T>::type>::value)> {};
+
+ #else
+ #define EASTL_TYPE_TRAIT_is_trivially_destructible_CONFORMANCE 0
+
+ template <typename T>
+ struct is_trivially_destructible_helper
+ : public integral_constant<bool, (eastl::is_pod<T>::value || eastl::is_scalar<T>::value || eastl::is_reference<T>::value) && !eastl::is_void<T>::value> {};
+
+ template <typename T>
+ struct is_trivially_destructible
+ : public eastl::is_trivially_destructible_helper<typename eastl::remove_all_extents<T>::type> {};
+ #endif
+
+ #define EASTL_DECLARE_IS_TRIVIALLY_DESTRUCTIBLE(T, isTriviallyDestructible) \
+ namespace eastl{ \
+ template <> struct is_trivially_destructible<T> : public eastl::integral_constant<bool, isTriviallyDestructible>{}; \
+ template <> struct is_trivially_destructible<const T> : public eastl::integral_constant<bool, isTriviallyDestructible>{}; \
+ template <> struct is_trivially_destructible<volatile T> : public eastl::integral_constant<bool, isTriviallyDestructible>{}; \
+ template <> struct is_trivially_destructible<const volatile T> : public eastl::integral_constant<bool, isTriviallyDestructible>{}; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_trivially_destructible_v = is_trivially_destructible<T>::value;
+ #endif
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_destructible
+ //
+ // is_destructible<T>::value is true and the indicated destructor is
+ // known not to throw any exceptions (5.3.7). T shall be a complete type,
+ // (possibly cv-qualified) void, or an array of unknown bound.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if 0 // defined(_MSC_VER) && (_MSC_VER >= 1800) // VS2013+ -- Disabled due to __is_nothrow_destructible being broken in VC++ versions up to at least VS2013. A ticket will be submitted for this
+ #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE ((_MSC_VER >= 1900) ? 1 : 0) // VS2013 (1800) doesn't support noexcept and so can't support all usage of this properly (in particular default exception specifications defined in [C++11 Standard, 15.4 paragraph 14].
+
+ template <typename T>
+ struct is_nothrow_destructible
+ : integral_constant<bool, __is_nothrow_destructible(T)> {};
+
+ #elif defined(EA_COMPILER_NO_NOEXCEPT)
+ #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE 0
+
+ template <typename T>
+ struct is_nothrow_destructible_helper
+ : public eastl::integral_constant<bool, eastl::is_scalar<T>::value || eastl::is_reference<T>::value> {};
+
+ template <typename T>
+ struct is_nothrow_destructible
+ : public eastl::is_nothrow_destructible_helper<typename eastl::remove_all_extents<T>::type> {};
+
+ #else
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION < 4008)
+ #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE 0 // GCC up to v4.7's noexcept is broken and fails to generate true for the case of compiler-generated destructors.
+ #else
+ #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE
+ #endif
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // *_noexcept_wrapper implements a workaround for VS2015. A standards conforming noexcept operator allows variadic template expansion.
+ // There appears to be an issue with VS2015 that prevents variadic template expansion into a noexcept operator that is passed directly
+ // to a template parameter.
+ //
+ // The fix hoists the noexcept expression into a separate struct and caches the result of the expression. This result is then passed to integral_constant.
+ //
+ // Example code from Clang libc++
+ // template <class _Tp, class... _Args>
+ // struct __libcpp_is_nothrow_constructible<[>is constructible*/true, /*is reference<]false, _Tp, _Args...>
+ // : public integral_constant<bool, noexcept(_Tp(declval<_Args>()...))> { };
+ //
+
+ template <typename T>
+ struct is_nothrow_destructible_helper_noexcept_wrapper
+ { static const bool value = noexcept(eastl::declval<T&>().~T()); };
+
+ template <typename T, bool>
+ struct is_nothrow_destructible_helper;
+
+ template <typename T>
+ struct is_nothrow_destructible_helper<T, false>
+ : public eastl::false_type {};
+
+ template <typename T>
+ struct is_nothrow_destructible_helper<T, true> // If the expression T::~T is a noexcept expression then it's nothrow.
+ : public eastl::integral_constant<bool, is_nothrow_destructible_helper_noexcept_wrapper<T>::value > {};
+
+ template <typename T>
+ struct is_nothrow_destructible // A type needs to at least be destructible before it could be nothrow destructible.
+ : public eastl::is_nothrow_destructible_helper<T, eastl::is_destructible<T>::value> {};
+
+ template <typename T, size_t N> // An array is nothrow destructible if its element type is nothrow destructible.
+ struct is_nothrow_destructible<T[N]> // To consider: Replace this with a remove_all_extents pathway.
+ : public eastl::is_nothrow_destructible<T> {};
+
+ template <typename T>
+ struct is_nothrow_destructible<T&> // A reference type cannot throw while being destructed. It's just a reference.
+ : public eastl::true_type {};
+
+ template <typename T>
+ struct is_nothrow_destructible<T&&> // An rvalue reference type cannot throw while being destructed.
+ : public eastl::true_type {};
+
+ #endif
+
+ #define EASTL_DECLARE_IS_NOTHROW_DESTRUCTIBLE(T, isNoThrowDestructible) \
+ namespace eastl{ \
+ template <> struct is_nothrow_destructible<T> { static const bool value = isNoThrowDestructible; }; \
+ template <> struct is_nothrow_destructible<const T> { static const bool value = isNoThrowDestructible; }; \
+ template <> struct is_nothrow_destructible<volatile T> { static const bool value = isNoThrowDestructible; }; \
+ template <> struct is_nothrow_destructible<const volatile T> { static const bool value = isNoThrowDestructible; }; \
+ }
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_destructible_v = is_nothrow_destructible<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_default_constructible
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #define EASTL_TYPE_TRAIT_is_nothrow_default_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_nothrow_default_constructible
+ : public eastl::is_nothrow_constructible<T> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_default_constructible_v = is_nothrow_default_constructible<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_nothrow_move_constructible
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #define EASTL_TYPE_TRAIT_is_nothrow_move_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE
+
+ template <typename T>
+ struct is_nothrow_move_constructible
+ : public eastl::is_nothrow_constructible<T, typename eastl::add_rvalue_reference<T>::type> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_nothrow_move_constructible_v = is_nothrow_move_constructible<T>::value;
+ #endif
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
diff --git a/include/EASTL/internal/type_properties.h b/include/EASTL/internal/type_properties.h
new file mode 100644
index 0000000..5276f87
--- /dev/null
+++ b/include/EASTL/internal/type_properties.h
@@ -0,0 +1,380 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_PROPERTIES_H
+#define EASTL_INTERNAL_TYPE_PROPERTIES_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <limits.h>
+#include <EASTL/internal/type_compound.h>
+
+
+namespace eastl
+{
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // underlying_type
+ //
+ // Defines a member typedef type of type that is the underlying type for the enumeration T.
+ // Requires explicit compiler support to implement.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(_MSC_VER) && (_MSC_VER >= 1700)) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007)) || defined(EA_COMPILER_CLANG)) // VS2012+
+ #define EASTL_TYPE_TRAIT_underlying_type_CONFORMANCE 1 // underlying_type is conforming.
+
+ template <typename T>
+ struct underlying_type{ typedef __underlying_type(T) type; };
+
+ #else
+ #define EASTL_TYPE_TRAIT_underlying_type_CONFORMANCE 0
+
+ template <typename T>
+ struct underlying_type{ typedef int type; }; // This is of course wrong, but we emulate libstdc++ and typedef it as int.
+ #endif
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename T>
+ using underlying_type_t = typename underlying_type<T>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_unique_object_representations
+ //
+ // If T is TriviallyCopyable and if any two objects of type T with the same
+ // value have the same object representation, value is true. For any other
+ // type, value is false.
+ //
+ // http://en.cppreference.com/w/cpp/types/has_unique_object_representations
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE
+ #define EASTL_TYPE_TRAIT_has_unique_object_representations_CONFORMANCE 1
+
+ template <typename T>
+ struct has_unique_object_representations
+ : public integral_constant<bool, __has_unique_object_representations(remove_cv_t<remove_all_extents_t<T>>)>
+ {
+ };
+
+ #else
+ #define EASTL_TYPE_TRAIT_has_unique_object_representations_CONFORMANCE 0
+
+ template <typename T>
+ struct has_unique_object_representations
+ : public integral_constant<bool, is_integral_v<remove_cv_t<remove_all_extents_t<T>>>> // only integral types (floating point types excluded).
+ {
+ };
+
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR auto has_unique_object_representations_v = has_unique_object_representations<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_signed
+ //
+ // is_signed<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] char (maybe)
+ // [const] [volatile] signed char
+ // [const] [volatile] short
+ // [const] [volatile] int
+ // [const] [volatile] long
+ // [const] [volatile] long long
+ // [const] [volatile] float
+ // [const] [volatile] double
+ // [const] [volatile] long double
+ //
+ // Used to determine if a integral type is signed or unsigned.
+ // Given that there are some user-made classes which emulate integral
+ // types, we provide the EASTL_DECLARE_SIGNED macro to allow you to
+ // set a given class to be identified as a signed type.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_signed_CONFORMANCE 1 // is_signed is conforming.
+
+ template <typename T> struct is_signed_helper : public false_type{};
+
+ template <> struct is_signed_helper<signed char> : public true_type{};
+ template <> struct is_signed_helper<signed short> : public true_type{};
+ template <> struct is_signed_helper<signed int> : public true_type{};
+ template <> struct is_signed_helper<signed long> : public true_type{};
+ template <> struct is_signed_helper<signed long long> : public true_type{};
+ template <> struct is_signed_helper<float> : public true_type{};
+ template <> struct is_signed_helper<double> : public true_type{};
+ template <> struct is_signed_helper<long double> : public true_type{};
+
+ #if (CHAR_MAX == SCHAR_MAX)
+ template <> struct is_signed_helper<char> : public true_type{};
+ #endif
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ #if defined(__WCHAR_MAX__) && ((__WCHAR_MAX__ == 2147483647) || (__WCHAR_MAX__ == 32767)) // GCC defines __WCHAR_MAX__ for most platforms.
+ template <> struct is_signed_helper<wchar_t> : public true_type{};
+ #endif
+ #endif
+
+ template <typename T>
+ struct is_signed : public eastl::is_signed_helper<typename eastl::remove_cv<T>::type>{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_signed_v = is_signed<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_SIGNED(T) \
+ namespace eastl{ \
+ template <> struct is_signed<T> : public true_type{}; \
+ template <> struct is_signed<const T> : public true_type{}; \
+ template <> struct is_signed<volatile T> : public true_type{}; \
+ template <> struct is_signed<const volatile T> : public true_type{}; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_unsigned
+ //
+ // is_unsigned<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] char (maybe)
+ // [const] [volatile] unsigned char
+ // [const] [volatile] unsigned short
+ // [const] [volatile] unsigned int
+ // [const] [volatile] unsigned long
+ // [const] [volatile] unsigned long long
+ //
+ // Used to determine if a integral type is signed or unsigned.
+ // Given that there are some user-made classes which emulate integral
+ // types, we provide the EASTL_DECLARE_UNSIGNED macro to allow you to
+ // set a given class to be identified as an unsigned type.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_unsigned_CONFORMANCE 1 // is_unsigned is conforming.
+
+ template <typename T> struct is_unsigned_helper : public false_type{};
+
+ template <> struct is_unsigned_helper<unsigned char> : public true_type{};
+ template <> struct is_unsigned_helper<unsigned short> : public true_type{};
+ template <> struct is_unsigned_helper<unsigned int> : public true_type{};
+ template <> struct is_unsigned_helper<unsigned long> : public true_type{};
+ template <> struct is_unsigned_helper<unsigned long long> : public true_type{};
+
+ #if (CHAR_MAX == UCHAR_MAX)
+ template <> struct is_unsigned_helper<char> : public true_type{};
+ #endif
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ #if defined(_MSC_VER) || (defined(__WCHAR_MAX__) && ((__WCHAR_MAX__ == 4294967295U) || (__WCHAR_MAX__ == 65535))) // GCC defines __WCHAR_MAX__ for most platforms.
+ template <> struct is_unsigned_helper<wchar_t> : public true_type{};
+ #endif
+ #endif
+
+ template <typename T>
+ struct is_unsigned : public eastl::is_unsigned_helper<typename eastl::remove_cv<T>::type>{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR bool is_unsigned_v = is_unsigned<T>::value;
+ #endif
+
+ #define EASTL_DECLARE_UNSIGNED(T) \
+ namespace eastl{ \
+ template <> struct is_unsigned<T> : public true_type{}; \
+ template <> struct is_unsigned<const T> : public true_type{}; \
+ template <> struct is_unsigned<volatile T> : public true_type{}; \
+ template <> struct is_unsigned<const volatile T> : public true_type{}; \
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // alignment_of
+ //
+ // alignment_of<T>::value is an integral value representing, in bytes,
+ // the memory alignment of objects of type T.
+ //
+ // alignment_of may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_alignment_of_CONFORMANCE 1 // alignment_of is conforming.
+
+ template <typename T>
+ struct alignment_of_value{ static const size_t value = EASTL_ALIGN_OF(T); };
+
+ template <typename T>
+ struct alignment_of : public integral_constant<size_t, alignment_of_value<T>::value>{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR size_t alignment_of_v = alignment_of<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_aligned
+ //
+ // Defined as true if the type has alignment requirements greater
+ // than default alignment, which is taken to be 8. This allows for
+ // doing specialized object allocation and placement for such types.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_aligned_CONFORMANCE 1 // is_aligned is conforming.
+
+ template <typename T>
+ struct is_aligned_value{ static const bool value = (EASTL_ALIGN_OF(T) > 8); };
+
+ template <typename T>
+ struct is_aligned : public integral_constant<bool, is_aligned_value<T>::value>{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR size_t is_aligned_v = is_aligned<T>::value;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rank
+ //
+ // rank<T>::value is an integral value representing the number of
+ // dimensions possessed by an array type. For example, given a
+ // multi-dimensional array type T[M][N], std::tr1::rank<T[M][N]>::value == 2.
+ // For a given non-array type T, std::tr1::rank<T>::value == 0.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_rank_CONFORMANCE 1 // rank is conforming.
+
+ template<typename T>
+ struct rank : public eastl::integral_constant<size_t, 0> {};
+
+ template<typename T>
+ struct rank<T[]> : public eastl::integral_constant<size_t, rank<T>::value + 1> {};
+
+ template<typename T, size_t N>
+ struct rank<T[N]> : public eastl::integral_constant<size_t, rank<T>::value + 1> {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR auto rank_v = rank<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_base_of
+ //
+ // Given two (possibly identical) types Base and Derived, is_base_of<Base, Derived>::value == true
+ // if and only if Base is a direct or indirect base class of Derived,
+ // or Base and Derived are the same type.
+ //
+ // is_base_of may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_base_of)))
+ #define EASTL_TYPE_TRAIT_is_base_of_CONFORMANCE 1 // is_base_of is conforming.
+
+ template <typename Base, typename Derived>
+ struct is_base_of : public eastl::integral_constant<bool, __is_base_of(Base, Derived) || eastl::is_same<Base, Derived>::value>{};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <typename Base, typename Derived>
+ EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_base_of_v = is_base_of<Base, Derived>::value;
+ #endif
+ #else
+ // Not implemented yet.
+ // This appears to be implementable.
+ #define EASTL_TYPE_TRAIT_is_base_of_CONFORMANCE 0
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_lvalue_reference
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_lvalue_reference_CONFORMANCE 1 // is_lvalue_reference is conforming.
+
+ template<typename T> struct is_lvalue_reference : public eastl::false_type {};
+ template<typename T> struct is_lvalue_reference<T&> : public eastl::true_type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_lvalue_reference_v = is_lvalue_reference<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_rvalue_reference
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_is_rvalue_reference_CONFORMANCE 1 // is_rvalue_reference is conforming.
+
+ template <typename T> struct is_rvalue_reference : public eastl::false_type {};
+ template <typename T> struct is_rvalue_reference<T&&> : public eastl::true_type {};
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template<typename T>
+ EA_CONSTEXPR bool is_rvalue_reference_v = is_rvalue_reference<T>::value;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // result_of
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #define EASTL_TYPE_TRAIT_result_of_CONFORMANCE 1 // result_of is conforming.
+
+ template<typename> struct result_of;
+
+ template<typename F, typename... ArgTypes>
+ struct result_of<F(ArgTypes...)>
+ { typedef decltype(eastl::declval<F>()(eastl::declval<ArgTypes>()...)) type; };
+
+
+ // result_of_t is the C++14 using typedef for typename result_of<T>::type.
+ // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers.
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_RESULT_OF_T(T) typename result_of<T>::type
+ #else
+ template <typename T>
+ using result_of_t = typename result_of<T>::type;
+ #define EASTL_RESULT_OF_T(T) result_of_t<T>
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_equality
+ //
+ // Determines if the specified type can be tested for equality.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename, typename = eastl::void_t<>>
+ struct has_equality : eastl::false_type {};
+
+ template <typename T>
+ struct has_equality<T, eastl::void_t<decltype(eastl::declval<T>() == eastl::declval<T>())>> : eastl::true_type
+ {
+ };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ EA_CONSTEXPR auto has_equality_v = has_equality<T>::value;
+ #endif
+
+} // namespace eastl
+
+
+#endif // Header include guard
diff --git a/include/EASTL/internal/type_transformations.h b/include/EASTL/internal/type_transformations.h
new file mode 100644
index 0000000..cffa65e
--- /dev/null
+++ b/include/EASTL/internal/type_transformations.h
@@ -0,0 +1,606 @@
+/////////////////////////////////////////////////////////////////////////////
+// Copyright (c) Electronic Arts Inc. All rights reserved.
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_TRANFORMATIONS_H
+#define EASTL_INTERNAL_TYPE_TRANFORMATIONS_H
+
+
+#include <EABase/eabase.h>
+#if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ #pragma once
+#endif
+
+#include <limits.h>
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_const
+ //
+ // Add const to a type.
+ //
+ // Tor a given type T, add_const<T>::type is equivalent to T
+ // const if is_const<T>::value == false, and
+ // - is_void<T>::value == true, or
+ // - is_object<T>::value == true.
+ //
+ // Otherwise, add_const<T>::type is equivalent to T.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_add_const_CONFORMANCE 1 // add_const is conforming.
+
+ template <typename T, bool = eastl::is_const<T>::value || eastl::is_reference<T>::value || eastl::is_function<T>::value>
+ struct add_const_helper
+ { typedef T type; };
+
+ template <typename T>
+ struct add_const_helper<T, false>
+ { typedef const T type; };
+
+ template <typename T>
+ struct add_const
+ { typedef typename eastl::add_const_helper<T>::type type; };
+
+ // add_const_t is the C++17 using typedef for typename add_const<T>::type.
+ // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers.
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_ADD_CONST_T(T) typename add_const<T>::type
+ #else
+ template <typename T>
+ using add_const_t = typename add_const<T>::type;
+ #define EASTL_ADD_CONST_T(T) add_const_t<T>
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_volatile
+ //
+ // Add volatile to a type.
+ //
+ // For a given type T, add_volatile<T>::type is equivalent to T volatile
+ // if is_volatile<T>::value == false, and
+ // - is_void<T>::value == true, or
+ // - is_object<T>::value == true.
+ //
+ // Otherwise, add_volatile<T>::type is equivalent to T.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_add_volatile_CONFORMANCE 1 // add_volatile is conforming.
+
+ template <typename T, bool = eastl::is_volatile<T>::value || eastl::is_reference<T>::value || eastl::is_function<T>::value>
+ struct add_volatile_helper
+ { typedef T type; };
+
+ template <typename T>
+ struct add_volatile_helper<T, false>
+ { typedef volatile T type; };
+
+ template <typename T> struct add_volatile
+ { typedef typename eastl::add_volatile_helper<T>::type type; };
+
+ template <class T> using add_volatile_t = typename add_volatile<T>::type;
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_cv
+ //
+ // The add_cv transformation trait adds const and volatile qualification
+ // to the type to which it is applied. For a given type T,
+ // add_volatile<T>::type is equivalent to add_const<add_volatile<T>::type>::type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_add_cv_CONFORMANCE 1 // add_cv is conforming.
+
+ template<typename T>
+ struct add_cv
+ {
+ typedef typename add_const<typename add_volatile<T>::type>::type type;
+ };
+
+ template <class T> using add_cv_t = typename add_cv<T>::type;
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // make_signed
+ //
+ // Used to convert an integral type to its signed equivalent, if not already.
+ // T shall be a (possibly const and/or volatile-qualified) integral type
+ // or enumeration but not a bool type.;
+ //
+ // The user can define their own make_signed overrides for their own
+ // types by making a template specialization like done below and adding
+ // it to the user's code.
+ ///////////////////////////////////////////////////////////////////////
+
+ // To do: This implementation needs to be updated to support C++11 conformance (recognition of enums) and
+ // to support volatile-qualified types. It will probably be useful to have it fail for unsupported types.
+ #define EASTL_TYPE_TRAIT_make_signed_CONFORMANCE 0 // make_signed is only partially conforming.
+
+ template <typename T> struct make_signed { typedef T type; };
+
+ template <> struct make_signed<unsigned char> { typedef signed char type; };
+ template <> struct make_signed<const unsigned char> { typedef const signed char type; };
+ template <> struct make_signed<unsigned short> { typedef signed short type; };
+ template <> struct make_signed<const unsigned short> { typedef const signed short type; };
+ template <> struct make_signed<unsigned int> { typedef signed int type; };
+ template <> struct make_signed<const unsigned int> { typedef const signed int type; };
+ template <> struct make_signed<unsigned long> { typedef signed long type; };
+ template <> struct make_signed<const unsigned long> { typedef const signed long type; };
+ template <> struct make_signed<unsigned long long> { typedef signed long long type; };
+ template <> struct make_signed<const unsigned long long> { typedef const signed long long type; };
+
+ #if (defined(CHAR_MAX) && defined(UCHAR_MAX) && (CHAR_MAX == UCHAR_MAX)) // If char is unsigned, we convert char to signed char. However, if char is signed then make_signed returns char itself and not signed char.
+ template <> struct make_signed<char> { typedef signed char type; };
+ template <> struct make_signed<const char> { typedef signed char type; };
+ #endif
+
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ #if (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 4294967295U)) // If wchar_t is a 32 bit unsigned value...
+ template<>
+ struct make_signed<wchar_t>
+ { typedef int32_t type; };
+ #elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 65535)) // If wchar_t is a 16 bit unsigned value...
+ template<>
+ struct make_signed<wchar_t>
+ { typedef int16_t type; };
+ #elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 255)) // If wchar_t is an 8 bit unsigned value...
+ template<>
+ struct make_signed<wchar_t>
+ { typedef int8_t type; };
+ #endif
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ using make_signed_t = typename make_signed<T>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_signed
+ //
+ // This is not a C++11 type trait, and is here for backwards compatibility
+ // only. Use the C++11 make_unsigned type trait instead.
+ ///////////////////////////////////////////////////////////////////////
+
+ template<class T>
+ struct add_signed : public make_signed<T>
+ { typedef typename eastl::make_signed<T>::type type; };
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // make_unsigned
+ //
+ // Used to convert an integral type to its signed equivalent, if not already.
+ // T shall be a (possibly const and/or volatile-qualified) integral type
+ // or enumeration but not a bool type.;
+ //
+ // The user can define their own make_signed overrides for their own
+ // types by making a template specialization like done below and adding
+ // it to the user's code.
+ ///////////////////////////////////////////////////////////////////////
+
+ // To do: This implementation needs to be updated to support C++11 conformance (recognition of enums) and
+ // to support volatile-qualified types. It will probably be useful to have it fail for unsupported types.
+ #define EASTL_TYPE_TRAIT_make_unsigned_CONFORMANCE 0 // make_unsigned is only partially conforming.
+
+ template <typename T> struct make_unsigned { typedef T type; };
+
+ template <> struct make_unsigned<signed char> { typedef unsigned char type; };
+ template <> struct make_unsigned<const signed char> { typedef const unsigned char type; };
+ template <> struct make_unsigned<signed short> { typedef unsigned short type; };
+ template <> struct make_unsigned<const signed short> { typedef const unsigned short type; };
+ template <> struct make_unsigned<signed int> { typedef unsigned int type; };
+ template <> struct make_unsigned<const signed int> { typedef const unsigned int type; };
+ template <> struct make_unsigned<signed long> { typedef unsigned long type; };
+ template <> struct make_unsigned<const signed long> { typedef const unsigned long type; };
+ template <> struct make_unsigned<signed long long> { typedef unsigned long long type; };
+ template <> struct make_unsigned<const signed long long> { typedef const unsigned long long type; };
+
+ #if (CHAR_MIN < 0) // If char is signed, we convert char to unsigned char. However, if char is unsigned then make_unsigned returns char itself and not unsigned char.
+ template <> struct make_unsigned<char> { typedef unsigned char type; };
+ template <> struct make_unsigned<const char> { typedef unsigned char type; };
+ #endif
+
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ #if (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ != 4294967295U)) // If wchar_t is a 32 bit signed value...
+ template<>
+ struct make_unsigned<wchar_t>
+ { typedef uint32_t type; };
+ #elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ != 65535)) // If wchar_t is a 16 bit signed value...
+ template<>
+ struct make_unsigned<wchar_t>
+ { typedef uint16_t type; };
+ #elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ != 255)) // If wchar_t is an 8 bit signed value...
+ template<>
+ struct make_unsigned<wchar_t>
+ { typedef uint8_t type; };
+ #endif
+ #endif
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ using make_unsigned_t = typename make_unsigned<T>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_unsigned
+ //
+ // This is not a C++11 type trait, and is here for backwards compatibility
+ // only. Use the C++11 make_unsigned type trait instead.
+ //
+ // Adds unsigned-ness to the given type.
+ // Modifies only integral values; has no effect on others.
+ // add_unsigned<int>::type is unsigned int
+ // add_unsigned<unsigned int>::type is unsigned int
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template<class T>
+ struct add_unsigned : public make_unsigned<T>
+ { typedef typename eastl::make_signed<T>::type type; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_pointer
+ //
+ // Remove pointer from a type.
+ //
+ // The remove_pointer transformation trait removes top-level indirection
+ // by pointer (if any) from the type to which it is applied. Pointers to
+ // members are not affected. For a given type T, remove_pointer<T*>::type
+ // is equivalent to T.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_remove_pointer_CONFORMANCE 1
+
+ template<typename T> struct remove_pointer { typedef T type; };
+ template<typename T> struct remove_pointer<T*> { typedef T type; };
+ template<typename T> struct remove_pointer<T* const> { typedef T type; };
+ template<typename T> struct remove_pointer<T* volatile> { typedef T type; };
+ template<typename T> struct remove_pointer<T* const volatile> { typedef T type; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ using remove_pointer_t = typename remove_pointer<T>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_pointer
+ //
+ // Add pointer to a type.
+ // Provides the member typedef type which is the type T*. If T is a
+ // reference type, then type is a pointer to the referred type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_add_pointer_CONFORMANCE 1
+
+ template<class T>
+ struct add_pointer { typedef typename eastl::remove_reference<T>::type* type; };
+
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class T>
+ using add_pointer_t = typename add_pointer<T>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_extent
+ //
+ // The remove_extent transformation trait removes a dimension from an array.
+ // For a given non-array type T, remove_extent<T>::type is equivalent to T.
+ // For a given array type T[N], remove_extent<T[N]>::type is equivalent to T.
+ // For a given array type const T[N], remove_extent<const T[N]>::type is equivalent to const T.
+ // For example, given a multi-dimensional array type T[M][N], remove_extent<T[M][N]>::type is equivalent to T[N].
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_remove_extent_CONFORMANCE 1 // remove_extent is conforming.
+
+ template<class T> struct remove_extent { typedef T type; };
+ template<class T> struct remove_extent<T[]> { typedef T type; };
+ template<class T, size_t N> struct remove_extent<T[N]> { typedef T type; };
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename T>
+ using remove_extent_t = typename remove_extent<T>::type;
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_all_extents
+ //
+ // The remove_all_extents transformation trait removes all dimensions from an array.
+ // For a given non-array type T, remove_all_extents<T>::type is equivalent to T.
+ // For a given array type T[N], remove_all_extents<T[N]>::type is equivalent to T.
+ // For a given array type const T[N], remove_all_extents<const T[N]>::type is equivalent to const T.
+ // For example, given a multi-dimensional array type T[M][N], remove_all_extents<T[M][N]>::type is equivalent to T.
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_remove_all_extents_CONFORMANCE 1 // remove_all_extents is conforming.
+
+ template<typename T> struct remove_all_extents { typedef T type; };
+ template<typename T, size_t N> struct remove_all_extents<T[N]> { typedef typename eastl::remove_all_extents<T>::type type; };
+ template<typename T> struct remove_all_extents<T[]> { typedef typename eastl::remove_all_extents<T>::type type; };
+
+ #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ template <typename T>
+ using remove_all_extents_t = typename remove_all_extents<T>::type;
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // aligned_storage
+ //
+ // The aligned_storage transformation trait provides a type that is
+ // suitably aligned to store an object whose size is does not exceed length
+ // and whose alignment is a divisor of alignment. When using aligned_storage,
+ // length must be non-zero, and alignment must >= alignment_of<T>::value
+ // for some type T. We require the alignment value to be a power-of-two.
+ //
+ // GCC versions prior to 4.4 don't properly support this with stack-based
+ // variables. The EABase EA_ALIGN_MAX_AUTOMATIC define identifies the
+ // extent to which stack (automatic) variables can be aligned for the
+ // given compiler/platform combination.
+ //
+ // Example usage:
+ // aligned_storage<sizeof(Widget), alignment_of(Widget)>::type widget;
+ // Widget* pWidget = new(&widget) Widget;
+ //
+ // aligned_storage<sizeof(Widget), 64>::type widgetAlignedTo64;
+ // Widget* pWidget = new(&widgetAlignedTo64) Widget;
+ //
+ // aligned_storage<sizeof(Widget), alignment_of(Widget)>::type widgetArray[37];
+ // Widget* pWidgetArray = new(widgetArray) Widget[37];
+ ///////////////////////////////////////////////////////////////////////
+
+ #define EASTL_TYPE_TRAIT_aligned_storage_CONFORMANCE 1 // aligned_storage is conforming.
+
+ #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4008)
+ // New versions of GCC do not support using 'alignas' with a value greater than 128.
+ // However, this code using the GNU standard alignment attribute works properly.
+ template<size_t N, size_t Align = EASTL_ALIGN_OF(double)>
+ struct aligned_storage
+ {
+ struct type { unsigned char mCharData[N]; } EA_ALIGN(Align);
+ };
+ #elif (EABASE_VERSION_N >= 20040) && !defined(EA_COMPILER_NO_ALIGNAS) // If C++11 alignas is supported...
+ template<size_t N, size_t Align = EASTL_ALIGN_OF(double)>
+ struct aligned_storage
+ {
+ typedef struct {
+ alignas(Align) unsigned char mCharData[N];
+ } type;
+ };
+
+ #elif defined(EA_COMPILER_MSVC) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION < 4007)) || defined(EA_COMPILER_EDG) // At some point GCC fixed their attribute(align) to support non-literals, though it's not clear what version aside from being no later than 4.7 and no earlier than 4.2.
+ // Some compilers don't allow you to to use EA_ALIGNED with anything by a numeric literal,
+ // so we can't use the simpler code like we do further below for other compilers. We support
+ // only up to so much of an alignment value here.
+ template<size_t N, size_t Align>
+ struct aligned_storage_helper { struct type{ unsigned char mCharData[N]; }; };
+
+ template<size_t N> struct aligned_storage_helper<N, 2> { struct EA_ALIGN( 2) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 4> { struct EA_ALIGN( 4) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 8> { struct EA_ALIGN( 8) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 16> { struct EA_ALIGN( 16) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 32> { struct EA_ALIGN( 32) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 64> { struct EA_ALIGN( 64) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 128> { struct EA_ALIGN( 128) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 256> { struct EA_ALIGN( 256) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 512> { struct EA_ALIGN( 512) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 1024> { struct EA_ALIGN(1024) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 2048> { struct EA_ALIGN(2048) type{ unsigned char mCharData[N]; }; };
+ template<size_t N> struct aligned_storage_helper<N, 4096> { struct EA_ALIGN(4096) type{ unsigned char mCharData[N]; }; };
+
+ template<size_t N, size_t Align = EASTL_ALIGN_OF(double)>
+ struct aligned_storage
+ {
+ typedef typename aligned_storage_helper<N, Align>::type type;
+ };
+
+ #else
+ template<size_t N, size_t Align = EASTL_ALIGN_OF(double)>
+ struct aligned_storage
+ {
+ union type
+ {
+ unsigned char mCharData[N];
+ struct EA_ALIGN(Align) mStruct{ };
+ };
+ };
+ #endif
+
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ #define EASTL_ALIGNED_STORAGE_T(N, Align) typename eastl::aligned_storage_t<N, Align>::type
+ #else
+ template <size_t N, size_t Align = EASTL_ALIGN_OF(double)>
+ using aligned_storage_t = typename aligned_storage<N, Align>::type;
+ #define EASTL_ALIGNED_STORAGE_T(N, Align) eastl::aligned_storage_t<N, Align>
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // aligned_union
+ //
+ // The member typedef type shall be a POD type suitable for use as
+ // uninitialized storage for any object whose type is listed in Types;
+ // its size shall be at least Len. The static member alignment_value
+ // shall be an integral constant of type std::size_t whose value is
+ // the strictest alignment of all types listed in Types.
+ // Note that the resulting type is not a C/C++ union, but simply memory
+ // block (of pod type) that can be used to placement-new an actual
+ // C/C++ union of the types. The actual union you declare can be a non-POD union.
+ //
+ // Example usage:
+ // union MyUnion {
+ // char c;
+ // int i;
+ // float f;
+ //
+ // MyUnion(float fValue) : f(fValue) {}
+ // };
+ //
+ // aligned_union<sizeof(MyUnion), char, int, float>::type myUnionStorage;
+ // MyUnion* pMyUnion = new(&myUnionStorage) MyUnion(21.4f);
+ // pMyUnion->i = 37;
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ #if defined(EA_COMPILER_NO_VARIADIC_TEMPLATES) || !EASTL_TYPE_TRAIT_static_max_CONFORMANCE
+ #define EASTL_TYPE_TRAIT_aligned_union_CONFORMANCE 0 // aligned_union is not conforming, as it supports only a two-member unions.
+
+ // To consider: Expand this to include more possible types. We may want to convert this to be a recursive
+ // template instead of like below.
+ template <size_t minSize, typename Type0, typename Type1 = char, typename Type2 = char, typename Type3 = char>
+ struct aligned_union
+ {
+ static const size_t size0 = eastl::static_max<minSize, sizeof(Type0)>::value;
+ static const size_t size1 = eastl::static_max<size0, sizeof(Type1)>::value;
+ static const size_t size2 = eastl::static_max<size1, sizeof(Type2)>::value;
+ static const size_t size = eastl::static_max<size2, sizeof(Type3)>::value;
+
+ static const size_t alignment0 = eastl::static_max<EA_ALIGN_OF(Type0), EA_ALIGN_OF(Type1)>::value;
+ static const size_t alignment1 = eastl::static_max<alignment0, EA_ALIGN_OF(Type2)>::value;
+ static const size_t alignment_value = eastl::static_max<alignment1, EA_ALIGN_OF(Type3)>::value;
+
+ typedef typename eastl::aligned_storage<size, alignment_value>::type type;
+ };
+
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ // To do: define macro.
+ #else
+ template <size_t minSize, typename Type0, typename Type1 = char, typename Type2 = char, typename Type3 = char>
+ using aligned_union_t = typename aligned_union<minSize, Type0, Type1, Type2, Type3>::type;
+ #endif
+ #else
+ #define EASTL_TYPE_TRAIT_aligned_union_CONFORMANCE 1 // aligned_union is conforming.
+
+ template <size_t minSize, typename Type0, typename ...TypeN>
+ struct aligned_union
+ {
+ static const size_t size = eastl::static_max<minSize, sizeof(Type0), sizeof(TypeN)...>::value;
+ static const size_t alignment_value = eastl::static_max<EA_ALIGN_OF(Type0), EA_ALIGN_OF(TypeN)...>::value;
+
+ typedef typename eastl::aligned_storage<size, alignment_value>::type type;
+ };
+
+ #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES)
+ // To do: define macro.
+ #else
+ template <size_t minSize, typename... TypeN>
+ using aligned_union_t = typename aligned_union<minSize, TypeN...>::type;
+ #endif
+
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // union_cast
+ //
+ // Safely converts between unrelated types that have a binary equivalency.
+ // This appoach is required by strictly conforming C++ compilers because
+ // directly using a C or C++ cast between unrelated types is fraught with
+ // the possibility of undefined runtime behavior due to type aliasing.
+ // The Source and Dest types must be POD types due to the use of a union
+ // in C++ versions prior to C++11. C++11 relaxes the definition of a POD
+ // such that it allows a classes with trivial default constructors whereas
+ // previous versions did not, so beware of this when writing portable code.
+ //
+ // Example usage:
+ // float f32 = 1.234f;
+ // uint32_t n32 = union_cast<uint32_t>(f32);
+ //
+ // Example possible mis-usage:
+ // The following is valid only if you are aliasing the pointer value and
+ // not what it points to. Most of the time the user intends the latter,
+ // which isn't strictly possible.
+ // Widget* pWidget = CreateWidget();
+ // Foo* pFoo = union_cast<Foo*>(pWidget);
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename DestType, typename SourceType>
+ DestType union_cast(SourceType sourceValue)
+ {
+ EASTL_CT_ASSERT((sizeof(DestType) == sizeof(SourceType)) &&
+ (EA_ALIGN_OF(DestType) == EA_ALIGN_OF(SourceType))); // To support differening alignments, we would need to use a memcpy-based solution or find a way to make the two union members align with each other.
+ //EASTL_CT_ASSERT(is_pod<DestType>::value && is_pod<SourceType>::value); // Disabled because we don't want to restrict what the user can do, as some compiler's definitions of is_pod aren't up to C++11 Standards.
+ //EASTL_CT_ASSERT(!is_pointer<DestType>::value && !is_pointer<SourceType>::value); // Disabled because it's valid to alias pointers as long as you are aliasong the pointer value and not what it points to.
+
+ union {
+ SourceType sourceValue;
+ DestType destValue;
+ } u;
+ u.sourceValue = sourceValue;
+
+ return u.destValue;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // void_t
+ //
+ // Maps a sequence of any types to void. This utility class is used in
+ // template meta programming to simplify compile time reflection mechanisms
+ // required by the standard library.
+ //
+ // http://en.cppreference.com/w/cpp/types/void_t
+ //
+ // Example:
+ // template <typename T, typename = void>
+ // struct is_iterable : false_type {};
+ //
+ // template <typename T>
+ // struct is_iterable<T, void_t<decltype(declval<T>().begin()),
+ // decltype(declval<T>().end())>> : true_type {};
+ //
+ ///////////////////////////////////////////////////////////////////////
+ #if EASTL_VARIABLE_TEMPLATES_ENABLED
+ template <class...>
+ using void_t = void;
+ #endif
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+